query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Ensure encoder produces expected encoded output.
def test_encoder(self): from sosbeacon.utils import number_encode number = 123 encoded = number_encode(number) self.assertEqual(encoded, 'b6')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_encode(self):\n pass # TODO(tlarsen)", "def _define_encoder(self):\n raise NotImplementedError", "def test_encoders(encoder):\n assert encoding.decode(None, encoder) is None\n assert encoding.encode(None, encoder) is None\n\n assert b\"\" == encoding.decode(b\"\", encoder)\n\n assert b\"string\" == encoding.decode(\n encoding.encode(\n b\"string\",\n encoder\n ),\n encoder\n )\n\n with pytest.raises(TypeError):\n encoding.encode(\"string\", encoder)\n\n with pytest.raises(TypeError):\n encoding.decode(\"string\", encoder)\n with pytest.raises(ValueError):\n encoding.decode(b\"foobar\", encoder)", "def encode(self):\n \n assert False, \"Not implemented.\"", "def test_encode():", "def testForceEncodeValueError(self):\n test_cases = [\n ('aaabbb', '3a 3b'),\n ('\\n\\n\\n', '3\\n'),\n ('aaaaaaaaaa', '10a'),\n ('aaaaaaaaaabbbbbbbbbbb', '10a 11b'),\n ('a'*1001, '1001a'),\n (''.join(['a'*1001, 'b'*909, 'c'*65, 'd'*2]), '1001a 909b 65c 2d'),\n ('aaaa1111\\nbbbb2222', '4a 41 1\\n 4b 42'),\n ]\n for data, expected in test_cases:\n obj = ASCIITransportFormat(\n ASCIITransportFormat.SupportedTypes.STRING,\n data,\n )\n self.assertEqual(obj.data, data)\n self.assertFalse(obj.encoded)\n self.assertFalse(obj.pseudo_encode)\n\n obj.encode()\n self.assertEqual(obj.data, expected)\n self.assertTrue(obj.encoded)\n self.assertFalse(obj.pseudo_encode)\n\n obj.encode(True)\n self.assertEqual(obj.data, expected)\n self.assertTrue(obj.encoded)\n self.assertTrue(obj.pseudo_encode)", "def encode(self): # pragma: no cover\n pass", "def encoder(self, inputs):\n pass", "def testEncode(self):\n test_cases = [\n ('\\n', '1\\n'),\n (' ', '1 '),\n ('aaabbb', '3a 3b'),\n ('a b', '1a 1 1b'),\n ('\\n\\n\\n', '3\\n'),\n ('122333', '11 22 33'),\n ('aaaaaaaaaa', '10a'),\n ('aaaaaaaaaabbbbbbbbbbb', '10a 11b'),\n ('a'*1001, '1001a'),\n (''.join(['a'*1001, 'b'*909, 'c'*65, 'd'*2]), '1001a 909b 65c 2d'),\n ]\n for data, expected in test_cases:\n encoded_result = ASCIITransportFormat.encode_data(data)\n self.assertEqual(encoded_result, expected)", "def has_encoder(self):\n return False if self.encoder is None else True", "def test_encoders_strings(encoder):\n assert \"\" == encoding.decode(b\"\", encoder)\n\n assert \"string\" == encoding.decode(\n encoding.encode(\n \"string\",\n encoder\n ),\n encoder\n )\n\n with pytest.raises(TypeError):\n encoding.encode(b\"string\", encoder)\n\n with pytest.raises(TypeError):\n encoding.decode(\"foobar\", encoder)", "def registerEncoder (encoder):\n assert False, \"TODO:\"", "def testEncoder(self):\n params = copy.copy(self.typical_instance)\n params.prob_f = 0.5\n params.prob_p = 0.5\n params.prob_q = 0.75\n\n rand_funcs = rappor.SimpleRandFuncs(params, MockRandom())\n rand_funcs.cohort_rand_fn = lambda a, b: a\n e = rappor.Encoder(params, 0, rand_funcs=rand_funcs)\n\n cohort, bloom_bits_irr = e.encode(\"abc\")\n\n self.assertEquals(0, cohort)\n self.assertEquals(0x000ffff, bloom_bits_irr)", "def encode(self, decoded):", "def test_encode(self):\n for (input, output) in self.tests:\n self.assertEqual(input.encode('imap4-utf-7'), output)", "def test_encode(self):\n assert url_encoder.encode(1) == 'TheStakeOut'\n assert url_encoder.encode(800) == 'TheStockTip-TheSeven'\n assert url_encoder.encode(99999) == 'MaleUnbonding-TheConversion-TheAndreaDoria'", "def testNoForceEncodeValueError(self):\n test_cases = [\n ('aaabbb', '3a 3b'),\n ('\\n\\n\\n', '3\\n'),\n ('aaaaaaaaaa', '10a'),\n ('aaaaaaaaaabbbbbbbbbbb', '10a 11b'),\n ('a'*1001, '1001a'),\n (''.join(['a'*1001, 'b'*909, 'c'*65, 'd'*2]), '1001a 909b 65c 2d'),\n ('aaaa1111\\nbbbb2222', '4a 41 1\\n 4b 42'),\n ]\n for data, expected in test_cases:\n obj = ASCIITransportFormat(\n ASCIITransportFormat.SupportedTypes.STRING,\n data,\n )\n self.assertEqual(obj.data, data)\n self.assertFalse(obj.encoded)\n self.assertFalse(obj.pseudo_encode)\n\n obj.encode()\n self.assertEqual(obj.data, expected)\n self.assertTrue(obj.encoded)\n self.assertFalse(obj.pseudo_encode)\n\n with self.assertRaises(ValueError):\n obj.encode()\n\n self.assertEqual(obj.data, expected)\n self.assertTrue(obj.encoded)\n self.assertFalse(obj.pseudo_encode)", "def test_encoder():\r\n #Check edge cases first\r\n assert encode_morse(123) == \"Plaintext is not a string!\", \"Test 1 failed, input integer 123\"\r\n assert encode_morse(\"\") == \"\", \"Test 2 failed, input ''\"\r\n assert encode_morse(\"^\") == \"ERROR: You can't encode the following character: ^\", \"Test 3 failed, input '^'\"\r\n assert encode_morse(\" e e \") == \". / .\", \"Test 4 failed, input ' e e '\"\r\n assert encode_morse(\"AbCd\") == \".- -... -.-. -..\", \"Test 5 failed, input 'AbCd'\"\r\n \r\n #Now we run possible plaintexts and check their corresponding ciphertexts\r\n assert encode_morse(\"the quick brown fox jumps over the lazy dog\") == \"- .... . / --.- ..- .. -.-. -.- / -... .-. --- .-- -. / ..-. --- -..- / .--- ..- -- .--. ... / --- ...- . .-. / - .... . / .-.. .- --.. -.-- / -.. --- --.\", \"Test 6 failed, input 'the quick brown fox jumps over the lazy dog'\"\r\n assert encode_morse(\"H1er0ph@nT + '\") == \".... .---- . .-. ----- .--. .... .--.-. -. - / .-.-. / .----.\", \"Test 7 failed, input 'H1er0ph@nT + ''\"\r\n assert encode_morse('\"' + \"'\") == \".-..-. .----.\", \"Test 8 failed, input ''(double apostrophe)' + '(single apostrophe)'\"\r\n \r\n #Check that input not mutated\r\n test_plaintext_9 = \"test\"\r\n encode_morse(test_plaintext_9)\r\n assert test_plaintext_9 == \"test\", \"Test 9 failed, input 'test' mutated\"\r\n \r\n #If all tests passed\r\n print (\"Congratulations! 9/9 tests passed!\")", "def test_Encoder_encode_decode_nack(self):\n interest = Interest(\"/data/test\")\n n = Nack(\"/data/test\", NackReason.NO_CONTENT, interest=interest)\n en = self.encoder1.encode(n)\n dn = self.encoder1.decode(en)\n self.assertTrue(n == dn)", "def encoder_test(\n encoder,\n input_data,\n regularizer,\n dropout_rate,\n output_dtype,\n output_shape,\n output_data=None,\n):\n tf.reset_default_graph()\n\n # Run the encoder\n input_data = tf.convert_to_tensor(input_data)\n dropout_rate = tf.convert_to_tensor(dropout_rate)\n is_training = tf.convert_to_tensor(False)\n\n hidden, _ = encoder(\n input_data,\n regularizer,\n dropout_rate,\n is_training=is_training\n )\n\n # Check output shape and type\n assert hidden.dtype == output_dtype\n assert hidden.shape.as_list() == output_shape\n\n if output_data is not None:\n # TODO the hidden output is actually a tensor. May need modification\n assert np.allclose(hidden, output_data)", "def argparse_encoder_validate(encoder: str) -> str:\n if encoder not in AVAILABLE_ENCODERS:\n raise ArgumentTypeError(\"%s is an invalid encoder.\" % encoder)\n return encoder", "def test_verify(self):\n self.encoder._verify = True\n self.assertTrue(self.encoder._verify)", "def test_Encoder_encode_decode_content(self):\n c = Content(\"/data/test\", \"HelloWorld\")\n ec = self.encoder1.encode(c)\n dc = self.encoder1.decode(ec)\n self.assertTrue(c == dc)", "def test_init(self):\n default_encoder_type = type(Encoder())\n\n payload = Payload()\n self.assertIsInstance(payload.encoder, default_encoder_type)\n\n json_encoder = JSONEncoder()\n payload = Payload(encoder=json_encoder)\n self.assertEqual(payload.encoder, json_encoder)", "def encode(self):\n with tf.name_scope(\"encode\"):\n self.encoder_inputs = tf.layers.dense(\n inputs=self.encoder_inputs,\n units=self.options['hidden_size'], activation=None, use_bias=True,\n kernel_initializer=tf.keras.initializers.he_normal(seed=None),\n bias_initializer=tf.zeros_initializer(),\n kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,\n kernel_constraint=None, bias_constraint=None, trainable=True,\n name=None, reuse=None)\n self.encoder_inputs = tf.layers.batch_normalization(self.encoder_inputs,\n axis=-1,\n momentum=0.99,\n epsilon=0.001,\n center=True,\n scale=True,\n beta_initializer=tf.zeros_initializer(),\n gamma_initializer=tf.ones_initializer(),\n moving_mean_initializer=tf.zeros_initializer(),\n moving_variance_initializer=tf.ones_initializer(),\n training=self.is_training,\n trainable=True,\n renorm=False,\n renorm_momentum=0.99)\n # Prepare inputs to the layer stack by adding positional encodings and\n # applying dropout.\n # embedded_inputs = self.embedding_softmax_layer(inputs)\n #\n inputs_padding = transformer_model_utils.get_padding(tf.cast(\n tf.reduce_max(100*self.encoder_inputs, [-1]),\n dtype=tf.int32))\n\n with tf.name_scope(\"add_pos_encoding\"):\n length = tf.shape(self.encoder_inputs)[1]\n pos_encoding = transformer_model_utils.get_position_encoding(\n length, self.options[\"hidden_size\"])\n encoder_inputs = self.encoder_inputs + pos_encoding\n\n if self.is_training:\n encoder_inputs = tf.nn.dropout(\n encoder_inputs, 1 - self.options[\"layer_postprocess_dropout\"])\n\n return self.encoder_stack(encoder_inputs, self.attention_bias, inputs_padding)", "def testEncodeDecodeEmpty(self):\n test_case = ''\n obj = ASCIITransportFormat(\n ASCIITransportFormat.SupportedTypes.STRING,\n test_case,\n )\n self.assertEqual(obj.data, test_case)\n self.assertFalse(obj.encoded)\n self.assertFalse(obj.pseudo_encode)\n\n obj.encode()\n self.assertEqual(obj.data, test_case)\n self.assertTrue(obj.encoded)\n self.assertTrue(obj.pseudo_encode)\n\n obj.decode()\n self.assertEqual(obj.data, test_case)\n self.assertFalse(obj.encoded)\n self.assertFalse(obj.pseudo_encode)", "def test_encoder(lambda_module):\n\n encoder = lambda_module.Encoder()\n\n assert isinstance(encoder.default(decimal.Decimal(10.5)), float)\n assert isinstance(encoder.default(decimal.Decimal(10)), int)\n assert isinstance(encoder.default(datetime.datetime.now()), str)", "def test_encode_decode(self):\n assert self._test == pybinn.loads(pybinn.dumps(self._test))", "def testEncodeDecodeEmpty(self):\n test_case = ''\n string_obj = ASCIITransportFormat(\n ASCIITransportFormat.SupportedTypes.STRING,\n test_case,\n )\n json_obj = string_obj.json()\n obj = ASCIITransportFormat(\n ASCIITransportFormat.SupportedTypes.JSON,\n json_obj,\n )\n self.assertEqual(obj.data, test_case)\n self.assertFalse(obj.encoded)\n self.assertFalse(obj.pseudo_encode)\n\n json_obj = obj.json()\n obj = ASCIITransportFormat(\n ASCIITransportFormat.SupportedTypes.JSON,\n json_obj,\n )\n obj.encode()\n self.assertEqual(obj.data, test_case)\n self.assertTrue(obj.encoded)\n self.assertTrue(obj.pseudo_encode)\n\n json_obj = obj.json()\n obj = ASCIITransportFormat(\n ASCIITransportFormat.SupportedTypes.JSON,\n json_obj,\n )\n obj.decode()\n self.assertEqual(obj.data, test_case)\n self.assertFalse(obj.encoded)\n self.assertFalse(obj.pseudo_encode)", "def testEncodeDecodeEmpty(self):\n empty_case = ''\n encoded_result = ASCIITransportFormat.encode_data(empty_case)\n decoded_result = ASCIITransportFormat.decode_data(encoded_result)\n self.assertEqual(decoded_result, empty_case)", "def check_encoder(self):\n file = Path(self.get_encoder_path() + \".data-00000-of-00001\")\n\n return file.exists()", "def testEncodeEmpty(self):\n empty_case = ''\n encoded_result = ASCIITransportFormat.encode_data(empty_case)\n self.assertEqual(encoded_result, empty_case)", "def test_encrypt_encoding(self):\n encrypted = encrypt('message')\n\n assert encrypted\n assert encrypted != 'message'\n assert type(encrypted) == str", "def handle_encode(self, results):\n \n config.COD_PROMPT = config.ENC_PROMPT\n print config.ENC_PROMPT + \" encoding results...\"\n \n # while there is another decoder, run each item through the next decoder\n data = results\n success = False\n for encoder in self.encoder_list:\n current_encoder = encoder()\n full_body = getattr(current_encoder,'full_body_encode',False)\n success, data = self.recursive_decoder(current_encoder.encode, data, full_body)\n if not success:\n break\n print config.ENC_PROMPT + \"%s encoded to '%s'\" % ( current_encoder.name(),data)\n return success, data", "def test_encode():\n enig = Enigma(534, 16, 8, [4, 6, 0, 7, 3, 0, 2, 3, 7, 0, 4, 2, 6, 1, 5, 5])\n string = \"\"\"Hello, this is a test string. I will follow this with a return\nbringing it onto a new line. I can do this forever, but I won't. Just\nfor a while.\"\"\"\n encoded = \"\"\"-)m>&)IKp[1`Sro$82[@_`TV&`f%}|<]a1R*\\W4IEb6j@+':`R[.(1$vV4rTJ2\n6V?5.;8q r%0p@+[Ir7-?rzIl;nV<4W7,PD[5-?;RE+~vR5-`i}>=z@S \"eJ`8g:S:1ir\nE0=<F0~/;6).\"\"\"\n\n assert_equal(encoded, enig.encode(string))\n\n endsettings = [5, 2, 2, 7, 3, 0, 2, 3, 7, 0, 4, 2, 6, 1, 5, 5]\n assert_equal(endsettings, enig.getrotsettings())", "def rivine_binary_encode(self, encoder):\n pass", "def get_encoded(self):\n pass", "def test_encodeWithErrors(self):\n text = u'Hello world'\n self.assertEqual(\n text.encode('imap4-utf-7', 'strict'),\n text.encode('imap4-utf-7'))", "def encoding(self) -> 'layout.Encoding':", "def encoderRouter(self, encoder):\n pass", "def test_encode():\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry.find(DerivedSchema.MEDIA_TYPE)\n assert_that(loads(codec.encode(data=\"data\", media_type=DerivedSchema.MEDIA_TYPE)), is_(equal_to({\n \"data\": \"data\",\n \"mediaType\": DerivedSchema.MEDIA_TYPE,\n })))", "def base_encoder(cls, data, init_encoder, downsize_encoder, input_encoder):\n #todo: maybe do positional encoding before passing to init_encoder\n data = downsize_encoder(init_encoder(data))\n return input_encoder(data)", "def encode(self, input_):\n return self.encoder(input_)", "def doEncode(self):\n raise CipherError(\"override this func and return the encoded msg\")", "def test_fail():\n enig = Enigma()\n str1 = \"Hellow\"\n str2 = \"Potato\"\n en1 = enig.encode(str1)\n en2 = enig.encode(str2)\n de1 = enig.decode(en1)\n de2 = enig.decode(en2)\n\n assert_not_equal(str1, de1)\n assert_not_equal(str2, de2)", "def build_encoder(shift):\n ### TODO.\n while True:\n if shift >= 0 and shift < 27 :\n break\n else:\n print \"That is not a valid input.\"\n print\n final_dict = build_coder(shift)\n return final_dict", "def encode(self) -> bytes:\n\n raise NotImplemented()", "def fit_transform(self):\n if self.enc_types == \"label\":\n return self._label_encoding()\n elif self.enc_types == \"ohe\":\n return self._one_hot_encoder()\n elif self.enc_types == \"binary\":\n return self._binarization()\n else:\n raise Exception(\"Encoding type not understood\")", "def encode(self, formatted, unformatted, formatted_ex=None):\n return self.encoder(formatted, unformatted, formatted_ex)", "def encode(self, value):\r\n pass", "def testEncodeDecode(self):\n test_cases = [\n '\\n',\n ' ',\n 'aaabbb',\n 'a b',\n '\\n\\n\\n',\n '122333',\n 'aaaaaaaaaa',\n 'aaaaaaaaaabbbbbbbbbbb',\n 'a'*1001,\n ''.join(['a'*1001, 'b'*909, 'c'*65, 'd'*2]),\n ]\n for data in test_cases:\n encoded_result = ASCIITransportFormat.encode_data(data)\n decoded_result = ASCIITransportFormat.decode_data(encoded_result)\n self.assertEqual(decoded_result, data)", "def _apply_encoder(self, frame, prop, encoder, encoder_type=\"category\"):\n pass", "def encode(self, value):\n raise NotImplementedError()", "def test_encode_missing_field():\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry.find(DerivedSchema.MEDIA_TYPE)\n assert_that(calling(codec.encode), raises(ValidationError))", "def test_encode_data(self):\n if self._cls == 'MetaschemaType':\n for x in self._valid_decoded:\n self.assert_raises(NotImplementedError, self.import_cls.encode_type, x)\n self.assert_raises(NotImplementedError, self.import_cls.encode_data,\n x, self.typedef)\n self.assert_raises(NotImplementedError, self.import_cls.decode_data, None,\n self.typedef)\n else:\n for x in self._valid_decoded:\n y = self.import_cls.encode_type(x, **self._encode_type_kwargs)\n z = self.import_cls.encode_data(x, y, **self._encode_data_kwargs)\n self.import_cls.encode_data_readable(x, None)\n self.import_cls.encode_data_readable(x, y)\n x2 = self.import_cls.decode_data(z, y)\n self.assert_result_equal(x2, x)\n if self._cls not in ['JSONNullMetaschemaType', 'AnyMetaschemaType']:\n self.assert_raises(MetaschemaTypeError,\n self.import_cls.encode_type, None)", "def _encode(self, value):\n if value is None:\n return value\n if isinstance(value, six.binary_type):\n return value\n return value.encode(\"utf-8\")", "def encoderRouterContent(self, contentType, encoder):\n pass", "def test_state(self):\n self.encoder = FileEncoder(**self.default_kwargs)\n self.assertEqual(self.encoder.state, EncoderState.UNINITIALIZED)", "def encode(self):\n if self.ciphered:\n raise CipherError(\"already encoded.\")\n try:\n self.result = self.doEncode(self.msg,self.shift)\n except Exception as e:\n raise CipherError(\"encoding failure: {}.\".format(e))\n self.ciphered = True\n return self.result", "def _fit_encoder(self, frame, prop, encoder_type=\"category\"):\n pass", "def encode(self, state):\n raise NotImplementedError", "def encode_any(value: object) -> bytes:\n raise NotImplementedError()", "def test_main():\n\n encoder = HttpMoleCryptoEncoder('foobar.org', 'foobar')\n decoder = HttpMoleCryptoEncoder('foobar.org', 'foobar')\n retc = cb.mole.test.test_encoder(encoder, decoder=decoder)\n\n if retc == 0:\n print \"NO FAILURES / INCONCLUSIVE\"\n return retc", "def test_state(self):\n self.assertEqual(self.encoder.state, EncoderState.UNINITIALIZED)\n self.assertEqual(str(self.encoder.state), 'FLAC__STREAM_ENCODER_UNINITIALIZED')", "def encode_result(value: object) -> bytes:\n raise NotImplementedError()", "def encode(self, seq):", "def test_encode_pair():\n\tassert encode_pair(0, 0) == 0\n\tassert encode_pair(1, 0) == 1\n\tassert encode_pair(0, 1) == 2\n\tassert encode_pair(4, 6) == 207", "def _encode(self, data):\n raise NotImplementedError(\"_encode needs to be implemented in {} subclass\".format(type(self).__name__))", "def compare_output(self, input, output, expected):\n if type(input) == UnicodeType:\n input = input.encode('raw_unicode_escape')\n if type(output) == UnicodeType:\n output = output.encode('raw_unicode_escape')\n if type(expected) == UnicodeType:\n expected = expected.encode('raw_unicode_escape')\n try:\n self.assertEquals('\\n' + output, '\\n' + expected)\n except AssertionError:\n print >>sys.stderr, '\\n%s\\ninput:' % (self,)\n print >>sys.stderr, input\n print >>sys.stderr, '-: expected\\n+: output'\n print >>sys.stderr, ''.join(self.compare(expected.splitlines(1),\n output.splitlines(1)))\n raise", "def test_decode(self):\n pass # TODO(tlarsen)", "def encode(self, *stuff):\n if self._kv_fmt:\n result = self._encode_wire(stuff[0])\n else:\n result = self._encode_wire(stuff)\n return result.getvalue()", "def test_consistent_encoding_128(self):\n text = u\"abracadabra\" # pylint: disable=redundant-u-string-prefix\n self.assertEqual(\n CityHash128WithSeed(text), CityHash128WithSeed(text.encode(\"utf-8\"))\n )", "def encode(input):\n return ModelEncoder().encode(input)", "def test_encode_decode():\n enig = Enigma(534, 10, 5, [3, 1, 3, 4, 1, 0, 2, 3, 4, 2])\n string = \"\"\"A semi-enigma-machine-like cypher that uses 'rotors' to cypher\nand decypher messages. It is more unlike the enigma machine than\nit is alike, the wheelsspin one way to encode, the other to decode,\nwhereas the enigma machine always went forward, and paired keys.\nThis doesn't pair keys, nor does the cypher travel throught the rotors\nand back after hitting a reflector. The wheel positions are setteable,\nand as many wheels as you want can be added for extra 'cypherage'. This\nalso lacks the letter pairings, instead using a seed for random numbers\nused to shift the charactersets passed through.\"\"\"\n newstr = enig.encode(string)\n assert_equal(string, enig.decode(newstr))", "def encode(self):\n self.preprocess_msg()\n self._find_e()\n\n self.__encoded_msg = self._calculate(self.e)", "def encode(self,b):\n raise NotImplementedError('subclasses must override encode()!')", "def encode(self, obj):\n s = super(CustomEncoder, self).encode(obj)\n # If uncompressed, postprocess for formatting\n if len(s.splitlines()) > 1:\n s = self.postprocess(s)\n return s", "def encode(self, encode_data, image):\r\n raise NotImplementedError(\"Not Implemented\")", "def _encode_structure(self):\n pass", "def _encode_structure(self):\n pass", "def check_enc_dec(self, obj,\n # total length of encoded object\n length=None,\n # total length is at least the given number of bytes\n length_greater_or_equal=False,\n # approximate comparison (e.g. for float)\n approximate=False,\n # type marker expected at start of encoded output\n expected_type=None,\n # decoder params\n object_hook=None,\n object_pairs_hook=None,\n # additional arguments to pass to encoder\n **kwargs):\n encoded = self.ubjdumpb(obj, **kwargs)\n if expected_type is not None:\n self.type_check(encoded[0], expected_type)\n if length is not None:\n assert_func = self.assertGreaterEqual if length_greater_or_equal else self.assertEqual\n assert_func(len(encoded), length, self.__format_in_out(obj, encoded))\n if approximate:\n self.assertTrue(self.numbers_close(self.ubjloadb(encoded, object_hook=object_hook,\n object_pairs_hook=object_pairs_hook), obj),\n msg=self.__format_in_out(obj, encoded))\n else:\n self.assertEqual(self.ubjloadb(encoded, object_hook=object_hook,\n object_pairs_hook=object_pairs_hook), obj,\n self.__format_in_out(obj, encoded))", "def build_encoder(shift):\n ### TODO.", "def encode(frame, ovstream, output):\n try:\n pkt = ovstream.encode(frame)\n except Exception as err:\n print(\"encoding failed{0}\".format(err))\n\n if pkt is not None:\n try:\n output.mux(pkt)\n except Exception:\n print('mux failed: ' + str(pkt))\n return True", "def compute_encoding(self):\n for input_quantizer in self._input_quantizers.values():\n input_quantizer.compute_encoding()\n\n for quantizer in self.param_quantizers.values():\n # NOTE: If quantizer.enabled is True but quantizer.encoding is None,\n # quantizer.compute_encoding() will set quantizer.enabled to False.\n # Otherwise, quantizer.compute_encodings() is equivalent to no-op.\n quantizer.compute_encoding()\n\n for output_quantizer in self._output_quantizers.values():\n output_quantizer.compute_encoding()", "def test_wrong_argument_for_encoding(self):\n with self.assertRaises(exceptions.WrongArgumentTypeError):\n positional.encode(4.5, 10)", "def test_ooc_encoder(self):\r\n details = {\r\n 'number': 1,\r\n 'string': 'string',\r\n 'datetime': datetime.datetime.now(UTC())\r\n }\r\n jsondetails = json.dumps(details, cls=CourseSettingsEncoder)\r\n jsondetails = json.loads(jsondetails)\r\n\r\n self.assertEquals(1, jsondetails['number'])\r\n self.assertEqual(jsondetails['string'], 'string')", "def test__encode(mock__get_steganogan):\n\n # setup\n steganogan = MagicMock()\n mock__get_steganogan.return_value = steganogan\n\n params = MagicMock(\n cpu=True,\n architecture='basic',\n verbose=True,\n cover='image.jpg',\n output='output.png',\n message='Hello world'\n )\n\n # run\n cli._encode(params)\n\n # assert\n mock__get_steganogan.assert_called_once_with(params)\n steganogan.encode.assert_called_once_with('image.jpg', 'output.png', 'Hello world')", "def test_invalid_process(self):\n with self.assertRaises(TypeError):\n self.encoder.process([1, 2, 3, 4])", "def encode_all(self, inputs, encoder):\n input_shape = tf.shape(inputs)\n num_timesteps, batch_size = input_shape[0], input_shape[1]\n reshaped_inputs = tf.reshape(inputs, [-1, inputs.shape[-1]])\n inputs_encoded = encoder(reshaped_inputs)\n inputs_encoded = tf.reshape(inputs_encoded, [num_timesteps, batch_size, encoder.output_size])\n return inputs_encoded", "def runit():\n print encode(\"no\")\n\n print encode(\"yes\")\n\n print encode(\"OMG\")\n\n print encode(\"O M G\")\n\n print encode(\"mindblowingly\")\n\n print encode(\"Testing, 1 2 3, testing.\")\n\n print encode(\"Truth is fiction.\")\n\n print encode(\"The quick brown fox jumps over the lazy dog.\")\n\n print decode(\"vcvix rhn\")\n\n print decode(\"zmlyh gzxov rhlug vmzhg vkkrm thglm v\")", "def test_bert_embedder_base_type(self, setup_bert_embedder):\n bert_embedder, iter_dict = setup_bert_embedder\n encoding = bert_embedder(iter_dict)\n assert encoding.size() == (2, 8, 768)", "def test_compression_level(self):\n test_compression_level = 8\n self.encoder._compression_level = test_compression_level", "def _test_encoder_fn(top_level_encoder):\n if top_level_encoder == 'simple':\n encoder_constructor = te.encoders.as_simple_encoder\n elif top_level_encoder == 'gather':\n encoder_constructor = te.encoders.as_gather_encoder\n else:\n raise ValueError('Unknown top_level_encoder.')\n\n identity_encoder = te.encoders.identity()\n test_encoder = te.core.EncoderComposer(\n te.testing.PlusOneOverNEncodingStage()).make()\n\n def encoder_fn(tensor):\n if np.prod(tensor.shape) > 1:\n encoder = encoder_constructor(test_encoder,\n tf.TensorSpec(tensor.shape, tensor.dtype))\n else:\n encoder = encoder_constructor(identity_encoder,\n tf.TensorSpec(tensor.shape, tensor.dtype))\n return encoder\n\n return encoder_fn", "def encode(self, out, val, type_name):\n\n if isinstance(val, UnknownData):\n # it was a blob of bytes because of a decoding problem;\n # just write the whole thing out\n out.write(val)\n return\n parse_tree = Serialization._parse_type(type_name)\n try:\n self._encode_tree(out, val, parse_tree)\n except UnknownCodecError as e:\n # rethrow UnknownCodecError, because we were supposed to catch it\n # via UnknownData. This means the user manually wrote a bad type.\n raise EncodeError(\"unknown codec: %s\" % e.name)", "def encode(self, image) -> bytes:\n raise NotImplementedError()", "async def test_setup_encoding(hass: HomeAssistant) -> None:\n respx.get(\"http://localhost\").respond(\n status_code=HTTPStatus.OK,\n stream=httpx.ByteStream(\"tack själv\".encode(encoding=\"iso-8859-1\")),\n )\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"name\": \"mysensor\",\n \"encoding\": \"iso-8859-1\",\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"method\": \"GET\",\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 1\n assert hass.states.get(\"sensor.mysensor\").state == \"tack själv\"", "def test_encoding(self):\n\n for name in TEST_NAMES:\n self.colorspace.setEncoding(name)\n self.assertEqual(name, self.colorspace.getEncoding())", "def test_bug_CategoryEncoder():\n if category_encoders is None:\n return # This will show as 'expected to fail but pass' if package isn't installed\n # If the bug is corrected I'll know it\n for klass in (\n category_encoders.HelmertEncoder,\n category_encoders.PolynomialEncoder,\n category_encoders.PolynomialEncoder,\n category_encoders.SumEncoder,\n ):\n\n df = pd.DataFrame({\"cat_col\": (np.array([\"a\", \"b\", \"c\", \"d\"]))[np.random.randint(0, 4, 100)]})\n enc = klass()\n df_enc = enc.fit_transform(df)\n\n df2 = df.head().copy()\n df2.loc[df2[\"cat_col\"] == \"d\", \"cat_col\"] = \"a\"\n df2_enc = enc.transform(df2)\n\n assert df_enc.shape[1] == df2_enc.shape[1]", "def is_encoded(self,text):\n \n try:\n str(text)\n except:\n return False\n else:\n return True", "def test_empty_key_string(self):\n def x():\n y = pyamf.MixedArray()\n y.update({'': 1, 0: 1})\n self.encode(y)\n\n self.assertRaises(pyamf.EncodeError, x)" ]
[ "0.7567242", "0.70699394", "0.69895875", "0.69377136", "0.68090206", "0.6634779", "0.65108114", "0.64991826", "0.64129996", "0.6411713", "0.64028895", "0.63984114", "0.63530314", "0.6349346", "0.6330712", "0.6303407", "0.6277781", "0.6252162", "0.6197097", "0.61829704", "0.6155082", "0.6114135", "0.6110147", "0.6098487", "0.6073241", "0.60635626", "0.60614806", "0.6048583", "0.6029933", "0.6006233", "0.5959423", "0.59533286", "0.5943206", "0.59322137", "0.59268016", "0.5905825", "0.590407", "0.58613414", "0.5860509", "0.5851864", "0.58440876", "0.5816427", "0.57919353", "0.5778777", "0.5772667", "0.5732265", "0.5686413", "0.56622404", "0.5659802", "0.56594795", "0.56471187", "0.5625947", "0.56231076", "0.56055075", "0.56051856", "0.56037134", "0.558169", "0.5572852", "0.5565216", "0.5563031", "0.55510575", "0.55447775", "0.55413204", "0.55275565", "0.55270106", "0.55235285", "0.5516712", "0.5494522", "0.54903895", "0.54885226", "0.54864585", "0.54761976", "0.54640007", "0.54488134", "0.54257315", "0.5420096", "0.54112554", "0.54033667", "0.5396035", "0.5396035", "0.5395829", "0.539577", "0.5376314", "0.5372901", "0.5370208", "0.5368553", "0.5340265", "0.53387946", "0.53374356", "0.5336674", "0.5334857", "0.53070235", "0.5304793", "0.5302802", "0.5284951", "0.5284418", "0.5273793", "0.5271709", "0.5256683", "0.5251215" ]
0.6834381
4
Ensure decoded correctly decodes a known encoded number.
def test_decoder(self): from sosbeacon.utils import number_decode encoded = 'b6' number = number_decode(encoded) self.assertEqual(number, 123)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode(self, number: int) -> typing.Union[int, str]:\n return number", "def test_decode_numbers(self, number, base, expected):\n self.assertEqual(positional.decode(number, base), expected)", "def test_decoding_non_str_fails(self):\n self.assertRaises(DecodingError, base62.to_decimal, sys.maxsize)", "def decode(self, encoded: str):\n if not isinstance(encoded, str) or not encoded:\n return None\n int_encoded = self._decode_str(encoded)\n if int_encoded is None:\n return None\n int_origin = self._int_obfuscator.decode(int_encoded)\n if int_origin is None:\n return None\n str_encoded = self.__encode(int_origin)\n return int_origin if str_encoded == encoded else None", "def _decode_int(data: BencodedString) -> int:\n data.del_prefix(1)\n end_marker_index = data.bytes.find(END_MARKER)\n\n if end_marker_index > 0:\n result_bytes = data.get_prefix(end_marker_index)\n data.del_prefix(end_marker_index + 1)\n else:\n raise ValueError(\n \"Cannot decode an integer, reached the end of the bencoded \"\n \"string before the end marker was found. Most likely the \"\n \"bencoded string is incomplete or incorrect.\"\n )\n\n return int(result_bytes.decode(\"ascii\"))", "def test_decode(self):\n assert url_encoder.decode('TheStakeOut') == 1\n assert url_encoder.decode('TheStockTip-TheSeven') == 800\n assert url_encoder.decode('MaleUnbonding-TheConversion-TheAndreaDoria') == 99999", "def decode(data): #@NoSelf", "def decode(self, value):\r\n pass", "def decode(self, encoded):", "def _decode(value):\n # TODO add support for strings\n return struct.unpack('<i', value)[0]", "def test_decode(self):\n pass # TODO(tlarsen)", "def decode(data):\n raise NotImplementedError", "def _decode_integer(bytes_data): # type: (bytes) -> int\n values = [ord(b) for b in bytes_data]\n # check if the integer is normalized\n if (\n len(values) > 1\n and (\n values[0] == 0xff\n and values[1] & 0x80\n or values[0] == 0x00\n and not (values[1] & 0x80)\n )):\n raise ASN1SyntaxError('ASN1 syntax error')\n negative = values[0] & 0x80\n if negative:\n # make positive by taking two's complement\n for i in range(len(values)):\n values[i] = 0xff - values[i]\n for i in range(len(values) - 1, -1, -1):\n values[i] += 1\n if values[i] <= 0xff:\n break\n\n values[i] = 0x00\n value = 0\n for val in values:\n value = (value << 8) | val\n if negative:\n value = -value\n try:\n value = value\n except OverflowError:\n pass\n return value", "def test_decode_raises_when_format_unknown(thing):\n with pytest.raises(ValueError):\n decode(thing)", "def test_deserialize_number():\n bytestring = bytes([\n 0b_1010_0001,\n 0b_1100_1111,\n 0b_1000_0010,\n 0b_0100_0001\n ])\n assert 136357793 == UnsignedInt.read(bytestring)", "def decode(encoded: str) -> int:\n if len(encoded) > 6:\n raise ValueError(\"Parameter too long\")\n\n value: int = 0\n\n for c in encoded:\n value <<= 6\n value += Base64.ord(c)\n value = Base64._int_overflow(value)\n\n return value", "def decode(self, value):\r\n return value", "def test_decode(self):\n self.assertEqual(\n hex_to_b64(self.hex_string),\n self.expect_result\n )", "def decode_varint(value):\n return decode_varint_stream(value).next()", "def decode(b):\n\n if b.startswith(\"0z\"):\n b = b[2:]\n\n l, i, v = len(b), 0, 0\n for x in b:\n v += _value(x) * (BASE ** (l - (i + 1)))\n i += 1\n\n return v", "def _decode(data: BencodedString) -> Union[bytes, dict, int, list]:\n if not data.bytes:\n raise ValueError(\"Cannot decode an empty bencoded string.\")\n\n if data.bytes[0] == START_DICT:\n return _decode_dict(data)\n\n if data.bytes[0] == START_LIST:\n return _decode_list(data)\n\n if data.bytes[0] == START_INTEGER:\n return _decode_int(data)\n\n if chr(data.bytes[0]).isdigit():\n return _decode_bytes(data)\n\n raise ValueError(\n \"Cannot decode data, expected the first byte to be one of \"\n f\"'d', 'i', 'l' or a digit, got {chr(data.bytes[0])!r} instead.\"\n )", "def decode(a):\n return decode(a)", "def test_inverse(self):\n from sosbeacon.utils import number_decode\n from sosbeacon.utils import number_encode\n\n for number in range(0, 500000, 339):\n encoded = number_encode(number)\n decoded = number_decode(encoded)\n self.assertEqual(number, decoded)", "def decode_unit(as_bytes: typing.List[int]) -> None:\n raise NotImplementedError()", "def test_ulong_long_int_decode(self):\n self.failUnlessEqual(self.readFunc('decode_longlong', self.const_integer_long_long_encoded), self.const_integer, 'unsigned long long decoding FAILED...')", "def decode_extra_field(self, string):\n\n if isinstance(string, str):\n try:\n decode = int(string)\n except ValueError:\n return string\n return decode\n else:\n return string", "def decode_string(self, value):\r\n return value", "def _dinamic_decode(self):\n raise NotImplementedError", "def _decode_int(f):\n\tassert f.read(1) == _TYPE_INT\n\treturn int(_readuntil(f))", "def decode_int(n):\n return stuct.unpack(\">I\", n)[0]", "def _ensure_number(value):\n assert isinstance(value, (bytes, float, int)), \"value has to be either bytes or float or int\"\n\n return int.from_bytes(value, byteorder=\"big\") if type(value) is bytes else value", "def decode_result(as_bytes: typing.List[int]):\n raise NotImplementedError()", "def decode(self, s):", "def decode(self, s):", "def decode(self, x):\n return x", "def decode(self, code):\n raise NotImplementedError", "def test_decodeWithErrors(self):\n bytes = b'Hello world'\n self.assertEqual(\n bytes.decode('imap4-utf-7', 'strict'),\n bytes.decode('imap4-utf-7'))", "def testDecode(self):\n test_cases = [\n ('1\\n', '\\n'),\n ('1 ', ' '),\n ('3a 3b', 'aaabbb'),\n ('1a 1 1b', 'a b'),\n ('3\\n', '\\n\\n\\n'),\n ('11 22 33', '122333'),\n ('10a', 'aaaaaaaaaa'),\n ('10a 11b', 'aaaaaaaaaabbbbbbbbbbb'),\n ('1001a', 'a'*1001),\n ('1001a 909b 65c 2d', ''.join(['a'*1001, 'b'*909, 'c'*65, 'd'*2])),\n ]\n for data, expected in test_cases:\n decoded_result = ASCIITransportFormat.decode_data(data)\n self.assertEqual(decoded_result, expected)", "def decode(val):\n if isinstance(val, Decimal):\n return float(val)\n return val", "def Decodingfunc(Codebyte):\r\n Decodedint=struct.unpack('b',Codebyte)[0]\r\n N=0 #number of repetitions\r\n L=0 # length of single/multiple sequence\r\n if Decodedint >= 0: #single\r\n N = 1\r\n L = Decodedint+1\r\n else: #multiple\r\n L = -Decodedint//16+1\r\n N = -Decodedint-(L-1)*16+1\r\n #print(\"N =\",N,\" L =\",L)\r\n return (N,L)", "def test_encode_decode(self):\n assert self._test == pybinn.loads(pybinn.dumps(self._test))", "def test_decodeWithoutFinalASCIIShift(self):\n self.assertEqual(\n b'&AL0'.decode('imap4-utf-7'),\n u\"\\N{VULGAR FRACTION ONE HALF}\",\n )", "def IntDecode(int_bytes: bytes) -> int:\n return ed25519_lib.int_decode(int_bytes)", "def test_encoding_non_int_fails(self):\n self.assertRaises(EncodingError, base62.from_decimal, string.ascii_letters)", "def decode(self, z):\n raise NotImplementedError", "def decode(self, encoded_value):\n return float(encoded_value) / (1 << self.frac_bits)", "def decode_network_number(ptype, plen, buf):\n return number.unpack_from(buf, header.size)[0]", "def resolve(self):\n # Get the binary representation as a string without the prefix 0b\n bin_str = bin(self.__number_to_reverse)[2:]\n # Reverse the string and convert it to integer\n return int(bin_str[::-1], 2)", "def test_check_wrong_argument_type(self, number, base):\n with self.assertRaises(exceptions.WrongArgumentTypeError):\n positional.decode(number, base)", "def test_numbers_roundtrip():\n for num in (0, 1, 2, 178, 300, BIG_NUMBER):\n num2 = UnsignedInt.read(UnsignedInt.to_bytes(num))\n assert num2 == num", "def test_decode():\n decoding = d.decode()\n assert type(decoding) == list\n assert len(decoding) == 7\n assert decoding[0] == '-12;-1\\n\\nESS'\n assert decoding[-1] == '2;-2\\n\\nWSWESNESSS'\n for x in decoding:\n assert \"\\n\" in x", "def _decode_index_value(self, index, value):\n if index.endswith(\"_int\"):\n return int(value)\n else:\n return value", "def test_json_decode_error(self):\n with self.assertRaises(json.JSONDecodeError):\n int_t.Ind.from_json('')\n with self.assertRaises(ValueError):\n int_t.Ind.from_json('{}')\n with self.assertRaises(ValueError):\n int_t.Ind.from_json('[{}]')", "def test_decode_errors(self):\n if self._invalid_encoded:\n self.assert_raises((ValueError, jsonschema.exceptions.ValidationError),\n self.import_cls.decode,\n self._invalid_encoded[0], self.typedef)", "def decode_from_value(byte_iter):\n value_length = wsp_pdu.Decoder.decode_value_length(byte_iter)\n # See what token we have\n byte = byte_iter.next()\n if byte == 129: # Insert-address-token\n return '<not inserted>'\n\n return MMSDecoder.decode_encoded_string_value(byte_iter)", "def decode(variable):\n try:\n if sys.version_info[1] > 5:\n return json.loads(base64.b64decode(variable))\n else:\n return json.loads(base64.b64decode(variable).decode('utf-8'))\n except json.decoder.JSONDecodeError:\n print('Error decoding JSON, code %d', json.decoder.JSONDecodeError)", "def decode_signed_value( name, value ):", "def test_wrong_twos_to_int(self):\n self.assertNotEqual(utils.twos_to_int('11111011'), 251)", "def decode_exponent(e: int) -> int:\n assert 0 <= e <= MAX_EXPONENT\n return ENCODING_TABLE[e]", "def test_decode_invalid_B_record():\n\n invalid_b_records = [\n 'B1053175438931N0ÿÿÿøÈÐÀÀÜÐá\u0015\u0004ÀÄÈàÔÀÄÈÌØÀÀÜÀÀ',\n 'BÿÿÿøÄÀÈÌÄàÐäÐàààÁ8ÀÄÔÀäÈÌå��ÀÄàÔäÀ',\n 'B1140ÿÿÿøÌÈÔÐÌÌààÑ8ÀÈÐÈÌàÌÕ\u0015\u0004ÀÀääÈÀÀäÔ',\n 'B1309044931600N0153ÿÿÿøÐÀÄÍ\u0015\u0004ÀÄÔÌØÀÄÔÜØÀÀäÀ',\n 'B10470349ÿÿÿøÌÔäØÕ8ÀÄÔÄÈàÜÙ\u0015\u0004ÀÄàÐÐÀÄäÀÜÀÀØÀ',\n 'B11052249474ÿÿÿøÀÉ8ÀÄÔÀÜÜäÕ\u0015\u0004ÀÄÌÐÌÀÄÐÀÈÀÀÔÀ',\n 'B12ÿÿÿøÐØÀÌÐäÐÈØäÝ8ÀÄÔÄÜÌÐÑ\u0015\u0004ÀÄØÐàÀÄÜÐÀÀÀÜÀÀÀ4)\bÄÈ',\n 'B1124185148269N9833N00553309EA0084800873000068000000',\n 'B1245085122369N00614242Eÿÿÿù\u0004ÀÄÜØÄÀÄàÐäÀÀØÀ',\n ]\n\n for b_record in invalid_b_records:\n with pytest.raises(ValueError):\n LowLevelReader.decode_B_record(b_record)", "def decode(self, data: bytes) -> bytes:\n ...", "def _decode_35699(data):\n start_byte = 0\n n_bytes = 2\n var_id = struct.unpack('<H', data[start_byte:start_byte + n_bytes])[0]\n if var_id == 29981:\n start_byte += n_bytes\n n_bytes = 16\n min_absolute, max_absolute = struct.unpack(\n '<dd', data[start_byte:start_byte + n_bytes])\n return {'min_absolute': min_absolute,\n 'max_absolute': max_absolute}", "def decode_u32(as_bytes: typing.List[int]) -> int:\n return le_bytes_to_int(as_bytes, False)", "def decode(variable):\n\n try:\n if sys.version_info[1] > 5:\n return json.loads(base64.b64decode(variable))\n else:\n return json.loads(base64.b64decode(variable).decode('utf-8'))\n except json.decoder.JSONDecodeError:\n print('Error decoding JSON, code %d', json.decoder.JSONDecodeError)", "def decode(self): # pragma: no cover\n pass", "def handle_decode(self, encoded_data):\n \n config.COD_PROMPT = config.DEC_PROMPT\n print config.DEC_PROMPT + \" decoding...\"\n \n # while there is another decoder, run each item through the next decoder\n data = encoded_data\n success = False\n for decoder in self.decoder_list:\n current_decoder = decoder()\n success, data = self.recursive_decoder(current_decoder.decode, data)\n if not success:\n break\n print config.DEC_PROMPT + \"%s decoded to '%s'\" % ( current_decoder.name(),data)\n return success, data", "def test_decode(self):\r\n barcodes = ['AGCACGAGCCTA',\r\n 'AACTCGTCGATG',\r\n 'ACAGACCACTCA',\r\n 'ACCAGCGACTAG',\r\n 'AGCAGCACTTGT',\r\n 'AACTGTGCGTAC',\r\n 'ACAGAGTCGGCT',\r\n 'ACCGCAGAGTCA',\r\n 'ACGGTGAGTGTC', ]\r\n for bc in barcodes:\r\n self.assertEqual(golay.decode(bc), (bc, 0))\r\n for bc in barcodes:\r\n err_bc = 'C' + bc[1:]\r\n self.assertEqual(golay.decode(err_bc), (bc, 2))", "def numDecodings(self, s):\n if not s or s[0] == '0':return 0\n s1,s2 = 1,1\n for m in xrange(1,len(s)):\n if s[m] == '0':s2 = 0\n if s[m-1] == '1' or (s[m-1] == '2' and s[m] <= '6'):\n s2 += s1\n s1 = s2 - s1\n else:\n s1 = s2\n if s2 == 0:return 0\n return s2", "def base64_decode(n, encoding='ISO-8859-1'):\t\n decoded = base64.decodestring(n.encode('ascii'))\t\n return tonative(decoded, encoding)", "def decode_dict(state):\n new_state = dict()\n for k, v in state.items():\n if v.decode().isnumeric():\n new_state[k.decode()] = int(v)\n else:\n new_state[k.decode()] = v.decode()\n return new_state", "def decode_any(as_bytes: typing.List[int]) -> object:\n raise NotImplementedError()", "def test_decode():", "def test_encoder(self):\n from sosbeacon.utils import number_encode\n\n number = 123\n encoded = number_encode(number)\n self.assertEqual(encoded, 'b6')", "def test_decode_barcode_8_ok(self):\r\n self.assertEqual(decode_barcode_8(self.valid_bc_1),\r\n (self.valid_bc_1, 0))\r\n self.assertEqual(decode_barcode_8(self.valid_bc_2),\r\n (self.valid_bc_2, 0))\r\n self.assertEqual(decode_barcode_8(self.valid_bc_3),\r\n (self.valid_bc_3, 0))\r\n self.assertEqual(decode_barcode_8(self.valid_bc_4),\r\n (self.valid_bc_4, 0))\r\n self.assertEqual(decode_barcode_8(self.valid_bc_5),\r\n (self.valid_bc_5, 0))", "def decode_position(s):\n if '=' in s:\n raise TypeError\n while True:\n try:\n bin = standard_b64decode(s)\n except TypeError, e:\n if str(e) != 'Incorrect padding':\n raise\n s += '='\n else:\n break\n r = twoside_decode(bin)\n if not len(r) == 2 or not len(r[0])==25 or not len(r[1])==25:\n raise bglib.encoding.DecodeError('got bad data: %s '%(s,))\n return r", "def test_parser_raises_decode_error(self):\n with self.assertRaises(ParseError):\n self.parser.parse(\n stream=BytesIO(b'{\"value\": NaN}'),\n media_type=\"application/json\",\n parser_context={},\n )", "def test_illgal_character(self):\n self.assertRaises(DecodingError, base62.to_decimal, '@@@@')", "def decode(decode_format):\n return output_from_decode", "def decode(codes,magic):\n\n def compact_decode2(codes,magic,uniqa,uniqb):\n \"\"\"Decodes a single integer value into the original 2 values.\n\n This is the inverse operation of compact_encode2.\n Thus compact_decode2(*compact_encode(data)) will return data.\n\n codes can be a subset of the encoded values, but the other 3 arguments\n should be exactly those from the compact_encode2 result.\n \"\"\"\n # decoding returns the indices into the uniq numberings\n pos = demagic2(codes,magic)\n return column_stack([uniqa[pos[:,0]],uniqb[pos[:,1]]])\n \n data = []\n for mag in magic:\n cols = compact_decode2(codes,mag[0],mag[1],mag[2])\n data.insert(0,cols[:,1])\n codes = cols[:,0]\n data.insert(0,codes)\n return Connectivity(column_stack(data))", "def _decode_octet_string(bytes_data): # type: (bytes) -> bytes\n return bytes_data", "def test_right_bytes_to_int(self):\n byte_string = b'\\x00\\x00\\xFA\\xFF'\n result = utils.bytes_to_int(byte_string, little_endian=False)\n expected_result = 64255\n self.assertEqual(result, expected_result)", "def test_unit_id_decode(self):\n pcb_revision = 0xAA\n unit_type = 0xCC\n decode = MidniteClassicModbusRegisters.UNIT_ID['decode']\n registers = []\n registers.append((pcb_revision << 8) | unit_type)\n expected = {\n 'pcb_revision': pcb_revision,\n 'unit_type': unit_type\n }\n self.assertDictEqual(expected, decode(registers))\n registers = ['a']\n self.assertRaises(TypeError, decode, registers)\n registers = []\n self.assertRaises(IndexError, decode, registers)", "def _decode_value(data):\n\n if type(data) is tuple:\n data = data[0]\n\n # Key does not exist\n if data == '0' or data == \"\":\n return None\n \n elif data[0] == _PREFIX:\n\n encoding = data[:2]\n value = data[2:]\n\n if encoding == _TYPE_DOUBLE or encoding == _TYPE_DOUBLE_C:\n return float(value)\n elif encoding == _TYPE_STRING or encoding == _TYPE_STRING_C:\n return value\n elif encoding == _TYPE_INT or encoding == _TYPE_INT_C:\n return int(value)\n elif encoding == _TYPE_BOOL or encoding == _TYPE_BOOL_C:\n return value == \"true\"\n else:\n return data\n\n elif data.startswith(\"<elsystem.collections.vector>\"):\n return _decode_vector(data)\n elif data.startswith(\"<elsystem.collections.dictionary>\"):\n return _decode_dictionary(data)\n else:\n return data", "def _decode(self, input_dict):\n pass", "def decode_u64(as_bytes: typing.List[int]) -> int:\n return le_bytes_to_int(as_bytes, False)", "def unpickle(value):\r\n try:\r\n value = int(value)\r\n except (ValueError, TypeError):\r\n value = smart_bytes(value)\r\n value = pickle.loads(value)\r\n return value", "def parseNum(num):\n num = str(num).strip()\n base = 10\n if (num[0] == '0') & (len(num) > 1):\n if num[1] == 'x':\n base = 16\n elif num[1] == 'b':\n base = 2\n else:\n base = 8\n return int(num, base)", "def _from_bytes(value, dummy, _int=int, _hexlify=_hexlify):\n return _int(_hexlify(value), 16)", "def decode_i32(as_bytes: typing.List[int]) -> int:\n return le_bytes_to_int(as_bytes, True)", "def mostlikelydecode(self):\n\n # Add your code here\n most_likely_codeword = Cipher(None) # Replace None with a method\n return most_likely_codeword.decode(None) # Replace None. What does decode take again in the Cipher class? ", "def decode_u8(as_bytes: typing.List[int]) -> int:\n return le_bytes_to_int(as_bytes, False)", "def _decode_key(self, key):\n return key if not key or isinstance(key, str) else key.decode()", "def decode_cl_value(as_bytes: typing.List[int]) -> CLValue:\n raise NotImplementedError()", "def decode(str):\n s6 = re.sub('6','\\n',str)\n s5 = re.sub('5','44',s6)\n s4 = re.sub('4','33',s5)\n s3 = re.sub('3','22',s4)\n return re.sub('2',' ',s3)", "def decode(self):\n self.decoded_content = []\n if self.encoded_content:\n s = self.encoded_content\n if self.encoding:\n if self.encoding.lower() == u'base64':\n s = decode_base64(s)\n elif self.encoding.lower() == u'csv':\n list_of_lines = s.split()\n for line in list_of_lines:\n self.decoded_content.extend(line.split(','))\n self.decoded_content = map(int, [val for val in self.decoded_content if val])\n s = \"\"\n else:\n raise Exception(u'unknown data encoding %s' % (self.encoding))\n else:\n # in the case of xml the encoded_content already contains a list of integers\n self.decoded_content = map(int, self.encoded_content)\n s = \"\"\n if self.compression:\n if self.compression == u'gzip':\n s = decompress_gzip(s)\n elif self.compression == u'zlib':\n s = decompress_zlib(s)\n else:\n raise Exception(u'unknown data compression %s' %(self.compression))\n else:\n raise Exception(u'no encoded content to decode')\n for idx in xrange(0, len(s), 4):\n val = ord(str(s[idx])) | (ord(str(s[idx + 1])) << 8) | \\\n (ord(str(s[idx + 2])) << 16) | (ord(str(s[idx + 3])) << 24)\n self.decoded_content.append(val)\n #print len(self.decoded_content)\n # generate the 2D version\n self._gen_2D()", "def decode(eVal):\n return pickle.loads(zlib.decompress(base64.b64decode(eVal)))", "def decode(self,m):\n raise NotImplementedError('subclasses must override decode()!')", "def test_decode_pair():\n\tassert decode_pair(0) == (0, 0)\n\tassert decode_pair(1) == (1, 0)\n\tassert decode_pair(2) == (0, 1)\n\tassert decode_pair(207) == (4, 6)", "def decode(x):\n\n try:\n return str(unichr(x).encode('ascii', 'replace')) # Make sure data is encoded properly\n except ValueError as err:\n print err\n print \"** ERROR - Decoded character is unrecognized **\"", "def _decode_data(self, data):\r\n return data.decode('ISO-8859-1')" ]
[ "0.7170725", "0.70083755", "0.66928744", "0.65111727", "0.646559", "0.6452943", "0.63323385", "0.63235164", "0.62588257", "0.6219759", "0.61692274", "0.61123574", "0.61047447", "0.6043373", "0.6040491", "0.5980549", "0.5975069", "0.59651697", "0.59623265", "0.5945968", "0.5913425", "0.59116644", "0.58524215", "0.58500725", "0.5846071", "0.58217824", "0.58111316", "0.58048606", "0.5797216", "0.5790242", "0.5761629", "0.5745151", "0.5725149", "0.5725149", "0.57188225", "0.570648", "0.5703216", "0.56929064", "0.56758296", "0.56727976", "0.56624806", "0.56536245", "0.5641026", "0.56272244", "0.56258696", "0.56139183", "0.56078947", "0.5605265", "0.5595154", "0.5594008", "0.5576637", "0.55501807", "0.5516001", "0.55033493", "0.55012506", "0.5494776", "0.5492495", "0.54914105", "0.5455994", "0.5452447", "0.54482764", "0.5441561", "0.5439026", "0.54271597", "0.54236674", "0.5415542", "0.54139364", "0.5412837", "0.5402405", "0.53931147", "0.53882396", "0.5380502", "0.5366649", "0.53651357", "0.5357312", "0.5351766", "0.53488636", "0.53476465", "0.53342503", "0.5331013", "0.53136027", "0.53052515", "0.53002906", "0.5297859", "0.5296411", "0.52951694", "0.52744657", "0.5274151", "0.5262703", "0.52469856", "0.52465063", "0.5240757", "0.52376926", "0.5237607", "0.5235483", "0.52345115", "0.5231077", "0.5219934", "0.5218103", "0.5214742" ]
0.78923905
0
Ensure decode(encode(number)) == number over a range of numbers.
def test_inverse(self): from sosbeacon.utils import number_decode from sosbeacon.utils import number_encode for number in range(0, 500000, 339): encoded = number_encode(number) decoded = number_decode(encoded) self.assertEqual(number, decoded)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def translate_num(number, lower_bound, upper_bound):\n try:\n value = int(number, 0)\n if value < lower_bound or value > upper_bound:\n raise translate_num_out_of_range(value, lower_bound, upper_bound)\n else:\n return value\n except:\n raise translate_num_error(number)", "def check_range(number: object, min_r: float, max_r: float, name: str = \"\") -> float:\n if not isinstance(number, (float, int)):\n raise FFmpegNormalizeError(f\"{name} must be an int or float\")\n if number < min_r or number > max_r:\n raise FFmpegNormalizeError(f\"{name} must be within [{min_r},{max_r}]\")\n return number", "def test_octet_out_of_upper_range(self):\n self.failUnlessRaises(Exception, self.codec.encode_octet, 256)", "def test_numbers_roundtrip():\n for num in (0, 1, 2, 178, 300, BIG_NUMBER):\n num2 = UnsignedInt.read(UnsignedInt.to_bytes(num))\n assert num2 == num", "def test_decode_numbers(self, number, base, expected):\n self.assertEqual(positional.decode(number, base), expected)", "def test_out_of_range_int_to_twos(self):\n with self.assertRaises(ValueError):\n utils.int_to_twos(4294967295)", "def test_ulong_int_out_of_upper_range(self):\n self.failUnlessRaises(Exception, self.codec.encode_long, 4294967296)", "def test_uoctet_out_of_lower_range(self):\n self.failUnlessRaises(Exception, self.codec.encode_octet, -1)", "def check_valid_range(val, max_val):\n if val < 0:\n val = 0\n elif val > max_val:\n val = max_val\n else:\n pass\n return val", "def is_armstrong_number(number: int) -> bool:\n result = 0\n num_str = str(number)\n for i in num_str:\n result += int(i) ** len(num_str)\n return result == number", "def __verify_range(value, minimum, maximum):\n if value in range(minimum, maximum):\n return True\n else:\n return False", "def validate_n_digits_range(min_val: int, max_val: int, n: int = 4) -> bool:\n def func(s: str):\n return validate_n_digits(n)(s) and min_val <= int(s) <= max_val\n return func", "def check_range(num):\n for i in ranges.keys():\n if num in ranges[i]:\n return i", "def test_wrong_twos_to_int(self):\n self.assertNotEqual(utils.twos_to_int('11111011'), 251)", "def check_number(self):\n digits = self.number\n _sum = 0\n alt = False\n ix = []\n for x in str(digits):\n ix.append(int(x))\n for d in reversed(ix):\n assert 0 <= d <= 9\n if alt:\n d *= 2\n if d > 9:\n d -= 9\n _sum += d\n alt = not alt\n return (_sum % 10) == 0", "def n_sanity_check(number):\n #number = min(99,number)\n #number = max(1,number)\n #return number\n if number > 99: # This is alot clearer no?\n return 99\n elif number < 1:\n return 1\n else:\n return number", "def validate_low_integer(number):\n if number < 2:\n raise MaxLimitTooLowError()", "def NumberTest(num, lower, upper):\n\n\t# run the check\n\tif (num >= lower) & (num <= upper):\n\t\treturn True\n\treturn False", "def is_armstrong_number(number: int) -> bool:\n\n str_number = f\"{number}\"\n return sum(pow(int(x), len(str_number)) for x in str_number) == number", "def checkio(str_number, radix):\n list_str_num = list(str_number)\n power = 0\n result = 0\n for i in range(len(list_str_num)):\n str_num = list_str_num[len(list_str_num) -1 - i]\n if str_num.isdigit():\n num = int(str_num)\n else:\n num = (ord(str_num) - 55)\n if num >= radix:\n return -1\n result += num * (radix ** i)\n return result", "def check_number(self, number):\n return (not self.whitelist_numbers or\n number in self._number_whitelist)", "def check_if_armstrong_number(number):\n sum = 0\n number_as_string = str(number)\n digits_number = len(number_as_string)\n for character in number_as_string:\n sum += int(character) ** digits_number\n\n return sum == number", "def test_ulong_int_out_of_lower_range(self):\n self.failUnlessRaises(Exception, self.codec.encode_long, -1)", "def is_valid(n):\n\tif type(n) == int:\n\t\tn = str(n)\n\tfor index, c in enumerate(n):\n\t\tif index == 0:\n\t\t\tcontinue\n\t\tif n[index - 1] > n[index]:\n\t\t\treturn False\n\treturn True", "def validate(number):\n number = compact(number)\n if len(number) != 10:\n raise InvalidLength()\n if not _nipt_re.match(number):\n raise InvalidFormat()\n return number", "def range_between_0_and_9(self, user_num):\r\n if 0 <= user_num < 9:\r\n return True\r\n else:\r\n return False", "def test_short_string_out_of_upper_range(self):\n self.failUnlessRaises(Exception, self.codec.encode_shortstr, 'x'*256)", "def validate(number):\n number = compact(number)\n if len(number) != 11:\n raise InvalidLength()\n if not isdigits(number):\n raise InvalidFormat()\n if number.startswith('0'):\n raise InvalidFormat()\n # In the first 10 digits exactly one digit must be repeated two or\n # three times and other digits can appear only once.\n counter = defaultdict(int)\n for n in number[:10]:\n counter[n] += 1\n counts = [c for c in counter.values() if c > 1]\n if len(counts) != 1 or counts[0] not in (2, 3):\n raise InvalidFormat()\n return mod_11_10.validate(number)", "def check_ranges(ranges, value):\n for fromto in ranges:\n start, end = fromto.split('-')\n if int(value) in range(int(start), int(end) + 1):\n return True\n # else:\n # print('%s is not between %s and %s' % (value, start, end))\n return False", "def test_non_numberic_validation(self):", "def test_non_numberic_validation(self):", "def test_ulong_long_int_out_of_upper_range(self):\n self.failUnlessRaises(Exception, self.codec.encode_longlong, 18446744073709551616)", "def is_number(c):\n return '0' <= c <= '9'", "def validate(number):\n number = compact(number)\n if not isdigits(number):\n raise InvalidFormat()\n if len(number) != 10:\n raise InvalidLength()\n if checksum(number) != 0:\n raise InvalidChecksum()\n return number", "def equal_neg_numbers(self):\n lst = [-98, -98, -98]\n self.assertEqual(max_integer(lst), -98)", "def match(self, digit: int) -> bool:\n return bool(self.range_from <= digit <= self.range_to)", "def test_ulong_long_int_out_of_lower_range(self):\n self.failUnlessRaises(Exception, self.codec.encode_longlong, -1)", "def is_valid_control_number(id_code: str) -> bool:\n check_numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1]\n check_sum = 0\n for i in range(10):\n check_sum += int(id_code[i]) * check_numbers[i]\n check_sum = check_sum % 11\n if check_sum == 10:\n check_numbers = [3, 4, 5, 6, 7, 8, 9, 1, 2, 3]\n check_sum = 0\n for i in range(10):\n check_sum += int(id_code[i]) * check_numbers[i]\n check_sum = check_sum % 11\n if check_sum == int(id_code[-1]):\n return True\n return False", "def is_valid_control_number(id_code: str) -> bool:\n sum = 1 * int(id_code[:1]) + 2 * int(id_code[1:2]) + 3 * int(id_code[2:3]) + 4 * int(id_code[3:4]) + 5 * \\\n int(id_code[4:5]) + 6 * int(id_code[5:6]) + 7 * int(id_code[6:7]) + 8 * int(id_code[7:8]) + 9 *\\\n int(id_code[8:9]) + 1 * int(id_code[9:10])\n control_number = sum % 11\n if int(control_number) == int(id_code[10:11]):\n return True\n elif int(control_number) == 10:\n sum = 3 * int(id_code[:1]) + 4 * int(id_code[1:2]) + 5 * int(id_code[2:3]) + 6 * int(id_code[3:4]) + 7 * \\\n int(id_code[4:5]) + 8 * int(id_code[5:6]) + 9 * int(id_code[6:7]) + 1 * int(id_code[7:8]) + 2 * \\\n int(id_code[8:9]) + 3 * int(id_code[9:10])\n control_number = sum % 11\n if control_number == int(id_code[10:11]):\n return True\n elif control_number == 10:\n if int(id_code[10:11]) == 0:\n return True\n else:\n return False\n else:\n return False", "def check_for_int(check):", "def check_correct_numbers(puzzle_size: int, puzzle_list: list) -> None:\n for number in range(puzzle_size * puzzle_size):\n if number not in puzzle_list:\n raise ParsingError(\"Puzzle does not contain expected numbers.\")", "def validate(self, value: Any, low: int, high: int) -> bool:\n pass", "def is_0to20(value):\n return 0 <= value <= 20", "def in_range(a :int, range :str):\n range = range.strip()\n left, right = range[0], range[-1]\n range = range[1:-1].split(',')\n \n brac = ['[', ']', '(', ')']\n assert left in brac and right in brac and len(range) == 2\n\n range = [int(i) for i in range]\n b, c = range\n\n a %= 1<<__m__\n b %= 1<<__m__\n c %= 1<<__m__\n\n if left == '[' and right == ']':\n return ( a>=b and a<=c ) if b <= c else ( a>=b or a<=c )\n if left == '[' and right == ')': \n return ( a>=b and a<c ) if b <= c else ( a>=b or a<c )\n if left == '(' and right == ')': \n return ( a>b and a<c ) if b <= c else ( a>b or a<c )\n if left == '(' and right == ']': \n return ( a>b and a<=c ) if b <= c else ( a>b or a<=c )", "def high_and_low_classic(numbers: str) -> str:\n min_num = max_num = int(numbers[0])\n for number in map(int, numbers.split()): # type: int\n if number < min_num:\n min_num = number\n if number > max_num:\n max_num = number\n return f'{min_num} {max_num}'", "def istele(number):\n if number[:3] == '140':\n return True\n return False", "def test_right_twos_to_int(self):\n self.assertEqual(utils.twos_to_int('101'.zfill(8)), 5)", "def equal_pos_numbers(self):\n lst = [98, 98, 98]\n self.assertEqual(max_integer(lst), 98)", "def check_number(number):\n digits = str(number)\n if len(digits) != 6:\n return False\n\n double = False\n last = '0'\n for digit in digits:\n if digit < last:\n return False\n\n if digit == last:\n double = True\n\n last = digit\n\n return double", "def check_number(client, num, min, max):\r\n while True:\r\n try:\r\n # Convert it into integer\r\n temp = int(num)\r\n if temp >= min and temp <= max:\r\n break\r\n else:\r\n msg_client(client, \"Perfavore, inserire un numero compreso tra: \" + str(min) + \" e \" + str(max) + \": \")\r\n num= client.recv(BUFSIZ)\r\n except ValueError:\r\n msg_client(client, \"Perfavore, inserire un numero compreso tra: \" + str(min) + \" e \" + str(max) + \": \")\r\n num = client.recv(BUFSIZ) \r\n return temp", "def test_toint(number, expected, cond):\n assert toInt(number, cond=cond) == expected", "def checkRange(currentNumRange: tuple, currentLevel: int):\n\n\tlowerNumber, higherNumber = currentNumRange[0], currentNumRange[1]\n\tmid = (higherNumber + lowerNumber) // 2\n\tans = getAnswer(f\"Does your number is greater than {mid}?\", mid)\n\n\tif ans:\n\t\tlowerNumber = mid\n\telse:\n\t\thigherNumber = mid\n\n\n\treturn (lowerNumber, higherNumber)", "def range_function(num, start_range, end_range):\n if num > start_range and num < end_range:\n print(num, \"is in the range.\\n\")\n elif num < start_range or num > end_range:\n print(num, \"is not in the range.\\n\")", "def validate_range_str(range_str):\n if not isinstance(range_str, str):\n return False\n ranges = range_str.split(\",\")\n assert len(ranges) > 0\n for r in ranges:\n # a range may be either e.g. '64', or '128-256'\n try:\n c = [int(x) for x in r.split(\":\")]\n except:\n return False\n # c should be either e.g. [ 128 ], or [64,128].\n if len(c) == 1:\n if c[0] <= 0:\n return False\n elif len(c) == 2:\n if c[0] <= 0 or c[1] < c[0]:\n return False\n else:\n return False\n return True", "def _is_in_range(valid_values):\n\n def f(x):\n if x not in valid_values:\n raise ValueError('{} not in {}'.format(x, valid_values))", "def _check_range(r): \n if _is_single_range(r):\n _check_one_range(r)\n elif isinstance(r, collections.Sequence):\n for r2 in r:\n _check_one_range(r2)\n else:\n raise error.RangeSyntaxError(str(r))", "def problem_52():\n\n for number in xrange(1, 123456789):\n sorted_num = ''.join(sorted(str(number)))\n if len([value for value in xrange(2, 7)\n if ''.join(sorted(str((value * number)))) == sorted_num]) == 5:\n return number", "def check(self, number: int) -> bool:\n return (number in self.numbers_set)", "def boundary(quantity, lower, upper):\r\n in_range = False\r\n while not in_range:\r\n if quantity < lower or quantity > upper:\r\n quantity = int(input(\"That is out of range, please try a number between \" + \\\r\n str(lower) + \" and \" + str(upper) + \": \"))\r\n else:\r\n in_range = True\r\n return quantity", "def test_task559_mersen_number(number, expected_value):\r\n assert algo.Task559.mersen_numbers(number) == expected_value", "def near_hundred(n):\n if 90 <= n <= 110 or 190 <= n <= 210:\n return True\n else:\n return False", "def is_number_palindrome(number, digits, start):\n number = str((number // 10**start) % 10**digits).zfill(digits)\n return is_palindrome(number)", "def high_and_low(numbers: str) -> str:\n return f'{min(map(int, numbers.split()))} {max(map(int, numbers.split()))}'", "def validate(val, num1=0, num2=float('inf')):\n val = int(val)\n if not num1 <= val < num2:\n raise ArgumentTypeError(\"Value out of range: {}. \"\n \"Should be between {} and {}.\".format(val, num1, num2 - 1))\n return val", "def is_valid_birth_number(birth_number: int):\n if birth_number in range(1, 1000):\n return True\n return False", "def encode(num):\n # Check the number is within our working range\n if num > SIZE: return None\n if num < 0: return None\n\n return friendly_number(perfect_hash(num))", "def is_in_range(value: float, lower_bound: float, upper_bound: float, err_string: str) -> None:\n if value < lower_bound or value > upper_bound:\n print('\\n' + err_string + '\\n')\n sys.exit(1)", "def _convert_range_boundary(boundary, test_value):\n if _is_string(boundary):\n if boundary.lower() == 'min':\n boundary = test_value - 1\n elif boundary.lower() == 'max':\n boundary = test_value + 1\n else:\n raise error.CommandDescriptionError('Invalid range boundary constant; must be \"min\", \"max\" or integer value')\n \n return boundary", "def test_int_range_constraint_validation():\n\n # Test valid values OK\n minimum = 1\n maximum = 2\n IntRangeConstraint(name=\"Ingredient count\", minimum=minimum, maximum=maximum)\n\n # Test minimum must be less than maximum\n minimum = 3\n maximum = 2\n try:\n RealRangeConstraint(name=\"Ingredient count\", minimum=minimum, maximum=maximum)\n assert False, \"IntRangeConstraint should require that minimum be less than maximum\"\n except CitrinationClientError:\n pass\n\n # Test values must be castable to float\n minimum = {}\n maximum = 2\n try:\n c = IntRangeConstraint(name=\"Ingredient count\", minimum=minimum, maximum=maximum)\n assert False, \"IntRangeConstraint should require that minimum and maximum be castable to integers\"\n except CitrinationClientError:\n pass", "def test_check_digits_with_custom_alphabet_and_sign(self, number, base, expected):\n alphabet = (\"Z\", \"!\", \"T\", \"#\", \"F\", \"%\", \"S\", \"&\", \"E\", \"(\", \"0\")\n\n converted = positional.encode(number, base, alphabet=alphabet, sign_literal=\"@\")\n self.assertEqual(converted, expected)\n self.assertEqual(\n positional.decode(converted, base, alphabet=alphabet, sign_literal=\"@\"),\n number,\n )", "def validate(number):\n number = compact(number)\n if len(number) != 9:\n raise InvalidLength()\n if not isdigits(number[2:]):\n raise InvalidFormat()\n if not isdigits(number[:2]) and not all(x in 'ABCEHKMOPT' for x in number[:2]):\n raise InvalidFormat()\n if number[0] not in '1234567ABCEHKM':\n raise InvalidComponent()\n if number[-1] != calc_check_digit(number):\n raise InvalidChecksum()\n return number", "def is_valid_birth_number(birth_number: int):\n if birth_number in range(1, 1000):\n return True\n else:\n return False", "def check(self, number: int) -> bool:\n return number in self.nums", "def test_serialize_number():\n num = 18178\n assert [\n 0b1000_0010,\n 0b1000_1110,\n 0b0000_0001\n ] == list(UnsignedInt.to_bytes(num))", "def sanitizeIntFromKeyboard(s,range_start=0,range_end=0):\n try:\n\tx = int(s)\n except ValueError:\n\terr = 1\n\treturn err,0\n\n if (x >= range_start) and (x <= range_end):\n\terr = 0\n return err,x\n else:\n\terr = 1\n\treturn err,x", "def validate_number(input_data):\n if input_data.startswith('-'):\n return input_data.i\n else:\n return False", "def is_armstrong_number(number: int) -> bool:\n return get_armstrong_value(number) == number", "def test_ushort_int_out_of_upper_range(self):\n self.failUnlessRaises(Exception, self.codec.encode_short, 65536)", "def valid_house_num(cls, new_num):\n if new_num > cls.POSITIVE_MIN:\n return True\n # else\n return False", "def sat(n: int, a=15482, b=23223, lower_bound=5):\n return a % n == 0 and b % n == 0 and n >= lower_bound", "def isRangeValid(self) -> bool:\n ...", "def _check_range(range_):\n try:\n if not isinstance(range_, list):\n range_ = list(range_)\n min_, max_ = range_\n except (ValueError, TypeError):\n raise TypeError(\"each range in ising_linear_ranges should be a list of length 2.\")\n if not isinstance(min_, Number) or not isinstance(max_, Number) or min_ > max_:\n raise ValueError((\"each range in ising_linear_ranges should be a 2-tuple \"\n \"(min, max) where min <= max\"))\n return range_", "def _is_range_boundary(boundary):\n return (isinstance(boundary, numbers.Integral) or\n (_is_string(boundary) and (boundary.lower() in ('min','max'))))", "def check_digit(raw_code):\n s = sum(code(char) * 2**index for index, char in enumerate(raw_code))\n return s % 11 % 10", "def test_inrange():\n assert cs.any > 0\n assert cs.any < cmax", "def guid_in_range(self, guid):\n return self.range_min <= util.guid_to_num(guid) < self.range_max", "def test_same(self):\n same_int = [22, 22, 22, 22]\n self.assertEqual(max_integer(same_int), 22)", "def is_valid_range(parser, arg, minimum=0, maximum=100):\n if arg < minimum:\n parser.error(\"%s < %s\", arg, minimum)\n else:\n if arg > maximum:\n parser.error(\"%s > %s\", arg, maximum)\n\n return arg", "def __validate(self, value: int, extend_range: bool):\n if extend_range:\n bottom, top = self.getRange()\n self.setRange(min(value, bottom), max(value, top))\n return numpy.clip(value, *self.getRange())", "def icd9_in_code_range(val, code_ranges):\n return any(val <= code_range[1] and val >= code_range[0] for code_range in code_ranges)", "def test_decoder(self):\n from sosbeacon.utils import number_decode\n\n encoded = 'b6'\n number = number_decode(encoded)\n self.assertEqual(number, 123)", "def verify_valid_num(self, user_num):\r\n if not self.range_between_0_and_9(user_num):\r\n print(\"\\033[1;31mJust what do you think you're doing, Dave? Choose a number between 0 and 8\\033[0m\")\r\n return False\r\n\r\n return True", "def is_palindrome(a):\n\tmax = a\n\tmin = 0\n\twhile max > 0:\n\t\tmin = (min * 10 + max % 10)\n\t\tmax /= 10\n\treturn min == a", "def is_valid_paid_value_range(value):\n\n min_valid_payment = 1\n max_valid_payment = 1_000_000\n\n if not min_valid_payment <= value <= max_valid_payment:\n raise serializers.ValidationError(\n 'Valor no permitido, debe ser estar entre 1 y 1000000'\n )\n return value", "def _convert_bound(value, lower_bound, upper_bound):\n # Converts value to 16 bit two's complement integer via bitwise.\n most_sig_bit = 0x8000\n\n # Gets the two least significant bits\n convert_val = value & _BYTE << _BYTE_SIZE | value & _BYTE\n # Extends the most significant bit if it is a 1. This is done by\n # carrying out the most significant bit.\n if bool(convert_val & most_sig_bit):\n convert_val |= ~(_BYTE << _BYTE_SIZE | _BYTE)\n\n # Bounds the converted value\n if convert_val > upper_bound:\n return upper_bound\n elif convert_val < lower_bound:\n return lower_bound\n return convert_val", "def __allowed_values_correct_number(self):\n strTestName = 'Values of a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'number #1')\n RxCSObject.paramAddMan('parameter2', 'number #2')\n RxCSObject.paramAllowed('parameter2', range(10))\n\n RxCSObject.parameter1 = 11\n RxCSObject.parameter2 = 0\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def test_ushort_int_out_of_lower_range(self):\n self.failUnlessRaises(Exception, self.codec.encode_short, -1)", "def check_for_integer(number):\r\n \r\n try:\r\n int(number) \r\n return True\r\n except ValueError:\r\n return False", "def validate(number):\n number = compact(number)\n if not isdigits(number):\n raise InvalidFormat()\n if len(number) != 16:\n raise InvalidLength()\n if _calc_checksum(number) != 0:\n raise InvalidChecksum()\n i = info(number)\n if 'bank' not in i or 'branch' not in i:\n raise InvalidComponent()\n return number", "def check(self, number):\n return number in self.numbers" ]
[ "0.6504324", "0.6368809", "0.6153909", "0.612515", "0.5986749", "0.5916171", "0.590891", "0.58644295", "0.581968", "0.581036", "0.5799809", "0.57941693", "0.579414", "0.5778868", "0.57601947", "0.57461756", "0.5717209", "0.57090324", "0.5706868", "0.56949294", "0.5694811", "0.569292", "0.5677516", "0.56279796", "0.5624956", "0.56204987", "0.56110126", "0.5591352", "0.5590477", "0.5590012", "0.5590012", "0.55787504", "0.5578329", "0.5551686", "0.5546494", "0.5542476", "0.5536136", "0.55324364", "0.5528587", "0.5528065", "0.551755", "0.5511266", "0.55071354", "0.54988176", "0.5492892", "0.5491494", "0.54907995", "0.5482808", "0.54738", "0.54515207", "0.54473275", "0.544504", "0.54389024", "0.54329526", "0.54315144", "0.5421184", "0.54165876", "0.5412614", "0.54114044", "0.5398429", "0.53881246", "0.5380899", "0.5377079", "0.53682286", "0.5362297", "0.5356908", "0.53563976", "0.5355855", "0.53533274", "0.5351805", "0.5344161", "0.53425276", "0.53420573", "0.5339024", "0.5325498", "0.5296474", "0.52963305", "0.5295423", "0.52937067", "0.5289564", "0.5280752", "0.527107", "0.5258539", "0.5258483", "0.5256072", "0.52548784", "0.5251915", "0.5243512", "0.5240109", "0.5239769", "0.5233301", "0.5230943", "0.5229176", "0.52254117", "0.5225256", "0.522369", "0.5223572", "0.52234226", "0.52219933", "0.522023" ]
0.64608574
1
Ensure taskqueue.Queue.add is called exactly once.
def test_insert_batch(self, queue_mock): from sosbeacon.utils import insert_tasks tasks = [] for i in xrange(1, 10): tasks.append(object()) added = insert_tasks(tasks, 'default') self.assertEqual(added, 9)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_splits_once(self, queue_add_mock):\n from google.appengine.api import taskqueue\n from sosbeacon.utils import insert_tasks\n\n def side_effect(*args):\n if 2 in args[0]:\n raise taskqueue.TombstonedTaskError('uh oh')\n\n queue_add_mock.side_effect = side_effect\n\n tasks = [i for i in xrange(0, 9)]\n added = insert_tasks(tasks, 'default')\n\n self.assertEqual(added, 8)\n self.assertEqual(queue_add_mock.call_count, 7)", "def _add(self, task: Task) -> None:\n with self._cond:\n if task not in self._task_set:\n self._task_set.add(task)\n self._tasks.append(task)\n self._cond.notify()", "def test_splits_on_taskexists(self, queue_add_mock):\n from google.appengine.api import taskqueue\n from sosbeacon.utils import insert_tasks\n\n queue_add_mock.side_effect = taskqueue.TaskAlreadyExistsError\n\n tasks = [i for i in xrange(0, 10)]\n added = insert_tasks(tasks, 'default')\n\n self.assertEqual(added, 0)\n self.assertEqual(queue_add_mock.call_count, 19)", "def add_queue(self, queue):\n with self.mutex:\n self.queues.append(queue)", "def add_to_queue(self, msg):\n if not self.queue.full():\n self.queue.put(msg)", "def __add__(self, value):\n self.queue.append(value)", "def test_add(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n new_task = self.task_storage.find(self.my_task.key)\n new_task.key = self.task_storage.add(new_task)\n\n self.assertNotEqual(self.my_task.key, new_task.key)\n self.task_storage.delete(new_task.key)", "def run_once(self):\r\n with self._run_lock:\r\n self.run(self._process_queue,True) # True: override flag for saving\r", "def insert_and_check(self, item) -> bool:\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True", "def test_enqueue(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n self.assertEqual(queue.size(), 3)", "def queueOn() -> None:\n\t\tLogging.enableQueue = Logging.queueSize > 0", "def test_put_element(self):\n queue = Queue_()\n queue.put(1)\n self.assertFalse(queue.empty())\n self.assertEqual(queue.size(), 1)\n self.assertEqual(queue.top(), 1)", "def add(self, item):\n completeDeferred = defer.Deferred()\n self.queue.append((item, completeDeferred))", "def put(self, item: Any):\n has_item = True\n with self._lock:\n if item not in self._items:\n self._items.add(item)\n has_item = False\n if not has_item:\n self._queue.put(item)", "def test_the_queue_enqueue(the_queue):\n the_queue.enqueue(2)\n assert the_queue._new_dll.head.data == the_queue._new_dll.tail.data == 2", "async def queue_add(\n self,\n ctx: commands.Context,\n players: List[Player],\n ) -> Optional[bool]:\n\n if ctx.guild.id in self.queue:\n self.queue[ctx.guild.id].queue += players\n else:\n self.queue[ctx.guild.id] = QueueManager(self.default_volume, players)\n\n return True", "def test_splits_on_tombstoned(self, queue_add_mock):\n from google.appengine.api import taskqueue\n from sosbeacon.utils import insert_tasks\n\n queue_add_mock.side_effect = taskqueue.TombstonedTaskError\n\n tasks = [i for i in xrange(0, 7)]\n added = insert_tasks(tasks, 'default')\n\n self.assertEqual(added, 0)\n self.assertEqual(queue_add_mock.call_count, 13)", "def register_queue(self, queue) -> None:\r\n if queue is None:\r\n raise ValueError('queue is None')\r\n if not hasattr(queue, 'empty'):\r\n raise ValueError(f'queue {queue} is missing empty member')\r\n if not hasattr(queue, 'get_nowait'):\r\n raise ValueError(f'queue {queue} is missing get_nowait member')\r\n self.receive_queues.append(queue)", "def add(self, item):\n if not (item in self.set):\n self.set[item] = True\n heapq.heappush(self.heap, item)", "def test_is_empty(self):\n queue = Queue()\n self.assertEqual(queue.is_empty(), True)\n queue.enqueue(1)\n self.assertEqual(queue.is_empty(), False)", "def test_add(self, publish_mock: Mock) -> None:\n\n self.plugin.add(\"foo\", \"bar\")\n\n publish_mock.assert_called_with(\n \"scheduler:add\",\n 1,\n \"applog:pull\"\n )\n\n self.assertEqual(\n len(self.plugin.queue),\n 1\n )", "def add(self, item: T) -> None:\n self._queue.append(item)\n if not self.is_empty():\n self._queue.sort(reverse=True)", "def testQueueMsg(self):\n self.mgr.isGoproBusy = True\n self.mgr.lastRequestSent = monotonic.monotonic()\n self.mgr.queueMsg(4)\n self.assertFalse( self.mgr.msgQueue.empty() )\n self.assertTrue(self.mgr.isGoproBusy)", "def only_once(self) -> bool:\n return self.times == 1", "def add(self, task):\n pass", "def add_task(self, func, *args, **kwargs):\n self.queue.put((func, args, kwargs))", "def block( self, fn ):\n\n if type(fn) == list:\n self._once += fn\n else:\n self._once.append( fn )", "def put_nowait(self, item):\r\n if self.full():\r\n raise QueueFull\r\n self._put(item)\r\n self._unfinished_tasks += 1\r\n self._finished.clear()\r\n self._wakeup_next(self._getters)", "def tidyUp():\n\n global queue\n queue.put(0)\n\n pass", "def tidyUp():\n\n global queue\n queue.put(0)\n\n pass", "def _add_to_queue(self, tok):\n if self._genpostfix:\n self._queue.append(tok)", "def enqueue_task(signature):\n try:\n if signature not in g._celery_tasks:\n g._celery_tasks.append(signature)\n except RuntimeError:\n signature()", "def _put(self, item, queue):", "def testQueueisEmpty(self):\n self.mgr.isGoproBusy = True\n self.mgr.processMsgQueue()\n self.assertFalse( self.mgr.isGoproBusy )", "def test_insertion_of_value_increases_length(empty_queue):\n assert len(empty_queue) == 0\n empty_queue.enqueue(100)\n assert len(empty_queue) == 1", "def enqueue(self, data):\n # Checking to avoid duplicate entry (not mandatory)\n if data not in self.queue:\n self.queue.insert(0, data)\n return True\n return False", "def assert_empty(self):\n if self._queue:\n raise CallQueueNotEmpty(\n \"Queue is not empty; {0} expected calls remaining.\"\n .format(len(self._queue))\n )", "def check_repeated_task(self, task):\n task_status = task in self.tasks_asked\n\n # append if never asked\n if task_status == False:\n self.tasks_asked.append(task)\n\n return task_status", "def any(self) -> bool:\n return len(self.queue) > 0", "def request_already_queued(self, request: str):\n try:\n self.create_request_queue_if_not_exists()\n queue = []\n db = self.get_db_safely()\n cursor = db.cursor()\n cursor.execute(\n \"\"\"SELECT rowid FROM queue WHERE request = ?\"\"\",\n (request,))\n for row in cursor:\n queue.append(row)\n if len(queue) == 0:\n return False\n else:\n return True\n except sqlite3.Error:\n # This is a lie, but we don't want to try and enqueue something if we got an error here.\n return True", "def post(self, event):\n self.queue.append(event)\n if not self.timer.active:\n self.timer.start(0)", "def runQueueEnqueue(self):\n raise NotImplementedError", "def _add_to_queue(key, value, step, time, run_id):\n met = Metric(key=key, value=value, timestamp=time, step=step)\n _metric_queue.append((run_id, met))\n if len(_metric_queue) > _MAX_METRIC_QUEUE_SIZE:\n _thread_pool.submit(_flush_queue)", "def test_bucketEmpty(self):\n b = SomeBucket()\n b.add(20)\n self.clock.set(9)\n empty = b.drip()\n self.assertFalse(empty)\n self.clock.set(10)\n empty = b.drip()\n self.assertTrue(empty)", "def __post_init__(self) -> None:\n self.gtex += [None]\n self.bm += [None]\n self._q: queue.Queue = queue.Queue(maxsize=self.maxsize)", "def testQueueFlushQueue(self):\n self.mgr.sendState = Mock()\n self.mgr.isGoproBusy = True\n self.mgr.lastRequestSent = monotonic.monotonic()\n self.mgr.queueMsg(1)\n self.mgr.queueMsg(2)\n self.assertEqual(self.mgr.msgQueue.qsize(), 2)\n self.mgr.lastRequestSent = monotonic.monotonic() - 3.0\n self.mgr.queueMsg(3)\n self.assertTrue(self.mgr.msgQueue.empty)\n self.mgr.sendState.assert_called_with()", "def reAddJob(self, runner):\n with self.__queueLock:\n if not runner.clientRunner:\n self.__queue.append(runner)\n else:\n self.__clientQueue.append(runner)\n if self.__profileJobs:\n runner.trackTime('queue')\n self.__submittedJobs.append(runner.identifier)", "def add_event_queue(self, proc):\r\n self.event_queue.append(proc)\r\n if not self.running:\r\n self.mw.after(self.run_check_ms, self.running_loop) # Start running loop\r", "def test_appended(self):\n genFn = Mock(return_value=None)\n expected = 123\n \n wrapper = KaoGenerator(genFn)\n wrapper.queue(expected)\n actual = wrapper._queue.pop()\n self.assertEqual(expected, actual)", "def __init__(self):\n self._queue_items = []", "def isEmpty(self):\r\n if (len(self.queue) >= 1):\r\n return False\r\n else:\r\n return True", "def put_nowait(self, item: _T) -> None:\n self._consume_expired()\n if self._getters:\n assert self.empty(), \"queue non-empty, why are getters waiting?\"\n getter = self._getters.popleft()\n self.__put_internal(item)\n future_set_result_unless_cancelled(getter, self._get())\n elif self.full():\n raise QueueFull\n else:\n self.__put_internal(item)", "def add_request_to_queue(self,request):\n self.queue.append(request)", "def _run_once(self):\n try:\n self.do_wait()\n self._execute_wakeup_tasks()\n self._trigger_timers()\n except Exception as e:\n Log.error(\"Error occured during _run_once(): \" + e.message)\n Log.error(traceback.format_exc())\n self.should_exit = True", "def add(self, task, qhigh, qlow):\n try:\n qlen = self.tasks.qsize()\n if qlen > qhigh:\n print \"Throttling input, reached HWM:\", qhigh\n while qlen > qlow:\n delay = random.randint(1,10)\n time.sleep(delay)\n qlen = self.tasks.qsize()\n print \"Throttling released, down to LWM:\", qlow\n except NotImplementedError:\n # Skip on Mac OS X (WARNING - use on OS X in testing only, queue \n # size will max out at a paltry 32768 items)\n pass\n try:\n self.tasks.put(task)\n self.recordsProcessed += task.datalen\n except qFull:\n # While testing: we shouldn't hopefully end up here...\n print \"ERR: queue full\"\n sys.exit(-1)", "def test_size(self):\n queue = Queue()\n self.assertEqual(queue.size(), 0)\n queue.enqueue(1)\n self.assertEqual(queue.size(), 1)", "def queue_append(self, obj, value):\n self.queue.append((obj, value))\n if len(self.queue) > self.queue_size:\n self.dump_queue()", "def test_the_queue_size(the_queue):\n the_queue.enqueue(1)\n the_queue.enqueue(2)\n the_queue.enqueue(3)\n assert the_queue.size() == 3", "def test_dequeue(self):\n queue = Queue()\n self.assertEqual(queue.dequeue(), None)\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n self.assertEqual(queue.dequeue(), 1)\n self.assertEqual(queue.size(), 2)", "def test_flush(queue):\n queue.put('oops')\n queue.flush()\n assert queue.get(timeout=1) is None", "def test_OpenCloseOneHundred(self):\n\n q = Queue(self.path)\n for i in range(1000):\n q.put('var%d' % i)\n del q\n q = Queue(self.path)\n self.assertEqual(1000, q.qsize())\n for i in range(1000):\n data = q.get()\n self.assertEqual('var%d' % i, data)\n q.task_done()\n with self.assertRaises(Empty):\n q.get_nowait()\n # assert adding another one still works\n q.put('foobar')\n data = q.get()", "def update_usage_queue(self, md5_hash):\n if md5_hash in self.usage_queue:\n self.remove_from_usage_queue(md5_hash)\n self.usage_queue.append(md5_hash)", "def put(self, task):\n self.put_idx += 1\n self.task_queue.put(task)", "def add(self, data):\n wasquiet = True if (self.tail == self.curr) else False\n\n # Assert the queue is clean\n qtail = self.base + \".\" + str(self.tail)\n print \"creating %s\" % qtail\n assert not os.path.exists(qtail)\n qt = open(qtail, \"w\")\n qt.write(data)\n qt.close()\n\n # Where does the next item go\n self.tail += 1\n self._settail(self.tail)\n\n return wasquiet", "def add(self, x):\n if x not in self:\n self._seen.add(x)\n self._list.append(x)\n return True\n return False", "def put(self, task):\n\n if not self.connected:\n raise QueueNotConnectedError(\"Queue is not Connected\")\n\n if task.unique:\n # first lets check if we have this hash already in our queue\n if not self.__db.sismember(self._lock_key, task.unique_hash()):\n self.__db.sadd(self._lock_key, task.unique_hash())\n else:\n raise TaskAlreadyInQueueException(\n 'Task already in Queue [{hash}]'.format(\n hash=task.unique_hash()))\n\n self.__db.lpush(self._key, task.to_json())\n\n return True", "def test_GarbageOnHead(self):\n\n q = Queue(self.path)\n q.put('var1')\n del q\n\n with open(os.path.join(self.path, 'q00001'), 'a') as fd:\n fd.write('garbage')\n\n q = Queue(self.path)\n q.put('var2')\n\n self.assertEqual(2, q.qsize())\n self.assertEqual('var1', q.get())\n q.task_done()", "def _pump_once(self):\n if self._pumping:\n self._flush()\n self._global_reactor.callLater(0.1, self._pump_once)\n else:\n for d in self._waiting_for_stop:\n d.callback(None)\n self._waiting_for_stop = []", "def check_ack_queue(self):\r\n try:\r\n while True:\r\n ack = self.ack_queue.get_nowait()\r\n self.handle_ack(ack)\r\n except queue.Empty:\r\n pass", "def put(self, task):\n self.put_id += 1\n self.task_queue.put(task)", "def non_blocking_put(self, item):\n try:\n self.q.put(item, block=False)\n return True\n except queue.Full:\n return False", "def test_check_existing_enqueues_tasks(self):\n collection = handlers_endpoints_v1.DigestCollection(\n namespace=handlers_endpoints_v1.Namespace())\n collection.items.append(\n generate_digest(collection.namespace.namespace, 'some content'))\n key = model.get_entry_key(\n collection.namespace.namespace, collection.items[0].digest)\n\n # guarantee that one digest already exists in the datastore\n model.new_content_entry(key).put()\n self.call_api('preupload', self.message_to_dict(collection), 200)\n\n # find enqueued tasks\n self.assertEqual(1, self.execute_tasks())", "def addToBq(self, p):\n self.allJobs.append(p)\n p.submitToQueue()\n if len(self.circQ) < self.maxLength - 1:\n self.circQ.append(p)", "def addToBq(self, p):\n self.allJobs.append(p)\n p.submitToQueue()\n if len(self.circQ) < self.maxLength - 1:\n self.circQ.append(p)", "def __init__(self):\r\n self.queue = []", "def __init__(self):\r\n self.queue = []", "def addTask(self, task):\n # Try to start task first. If it fails then we don't need to\n # undo adding it to taskset\n task.initialize(self)\n task.start()\n\n with self._lock_c:\n self.numtasks += 1\n self.taskset.append(task)", "def take_task(self, task):\n self._tasks_in_process.append(task)", "def empty(self):\r\n return self.queue == []", "def event_check(self):\r\n if len(self.event_queue) > 0:\r\n event = self.event_queue.pop(0) # oldest\r\n self.event_queue_proc(event)\r\n return True\r\n return False", "def __init__(self) -> None:\n self._queue = []", "def bypass_queue(self, name):\n # self.queue = [name] + self.queue\n # self.queue.insert(0, name)\n\n # self.lst = [name] + self.lst # This person is brought to the front of the queue\n self.lst.insert(0, name) #Not constant time as the pointer is moved for all the members of the queue, 0(n)\n print(f\"{name} has bypassed the queue\")", "def test_start_already_running(self, mock_add_job, mock_get_job):\n mock_get_job.return_value = MagicMock()\n\n result = self.aggregator.start(self.node_id)\n\n self.assertFalse(result)\n self.assertFalse(mock_add_job.called)", "def clear(self):\n self.queue = Queue()", "def test_the_queue_dequeue(the_queue):\n the_queue.enqueue(2)\n assert the_queue.dequeue() == 2", "def on_add(self):\n self.notify(on_add())", "def enqueue(self, item):\n self.queue.append(item)", "def enqueue(self, item):\n self.queue.append(item)", "def __init__(self): \n self.queue = []", "def enQueue(self, value):\n if not self.isFull():\n self.queue.append(value)\n return True\n else:\n return False", "def push(self):\n return False", "def add(self, schedule):\n try:\n if schedule in self.set:\n self.log.error(\"%s has already been added to this Scheduler.\" %\n schedule)\n return\n self.log.debug('Adding %s to timer for %s.' %\n (schedule, schedule.next))\n self.timer.add_task(schedule.next, self._enqueue, [schedule])\n self.set.add(schedule)\n except:\n self.log.error(\n \"Invalid schedule %s found, deleting.\" % schedule)\n schedule.soft_delete()", "def __init__(self):\n self.queue = []", "def __init__(self):\n self.queue = []", "def __init__(self):\n self.queue = []", "def __init__(self):\n self.queue = []", "def __init__(self):\n self.queue = []", "def add_to_queue(self, word):\n self.q.put(word)\n print(\"word \\'{}\\' added in clients queue\".format(word))", "def enQueue(self, value):\r\n if (len(self.queue) >= self.maxlen):\r\n return False\r\n else:\r\n self.queue.append(value)\r\n return True", "def enqueue(self, element):\n self.the_queue.append(element)", "def _unthreadsafe_add_connection(self, con):\n self._queue.put(con)" ]
[ "0.6944164", "0.65657383", "0.6554443", "0.62363005", "0.6164665", "0.6146997", "0.608468", "0.6042163", "0.6031211", "0.5978489", "0.59758395", "0.5960709", "0.5934469", "0.5904743", "0.5876805", "0.58512455", "0.5846621", "0.58196574", "0.58075684", "0.58046085", "0.5796316", "0.5787596", "0.5782536", "0.575451", "0.573472", "0.5732381", "0.5731907", "0.5722042", "0.56501263", "0.56501263", "0.56406015", "0.5635305", "0.5623517", "0.5609142", "0.55958253", "0.55853766", "0.5579906", "0.55755174", "0.5557872", "0.5542254", "0.5536369", "0.55360776", "0.553384", "0.5526282", "0.5521619", "0.551617", "0.5508045", "0.55025744", "0.55009687", "0.5496189", "0.5492586", "0.5483445", "0.54798937", "0.5478269", "0.5476806", "0.54651433", "0.54642725", "0.5456674", "0.5451647", "0.5450282", "0.5438065", "0.54294777", "0.54277074", "0.54245776", "0.54242045", "0.542164", "0.5421114", "0.54111725", "0.54008496", "0.5398257", "0.53981525", "0.5394292", "0.5391527", "0.5391527", "0.53884697", "0.53884697", "0.5385764", "0.5385504", "0.5383751", "0.53810287", "0.53743297", "0.53646255", "0.5357164", "0.5355557", "0.5354349", "0.53495336", "0.53450966", "0.53450966", "0.53318244", "0.53292316", "0.532768", "0.5326004", "0.532336", "0.532336", "0.532336", "0.532336", "0.532336", "0.5321367", "0.53208274", "0.53206176", "0.531844" ]
0.0
-1
Ensure task batches are split and insertion is retried on TaskAlreadyExistsError.
def test_splits_once(self, queue_add_mock): from google.appengine.api import taskqueue from sosbeacon.utils import insert_tasks def side_effect(*args): if 2 in args[0]: raise taskqueue.TombstonedTaskError('uh oh') queue_add_mock.side_effect = side_effect tasks = [i for i in xrange(0, 9)] added = insert_tasks(tasks, 'default') self.assertEqual(added, 8) self.assertEqual(queue_add_mock.call_count, 7)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_splits_on_taskexists(self, queue_add_mock):\n from google.appengine.api import taskqueue\n from sosbeacon.utils import insert_tasks\n\n queue_add_mock.side_effect = taskqueue.TaskAlreadyExistsError\n\n tasks = [i for i in xrange(0, 10)]\n added = insert_tasks(tasks, 'default')\n\n self.assertEqual(added, 0)\n self.assertEqual(queue_add_mock.call_count, 19)", "def test_splits_on_tombstoned(self, queue_add_mock):\n from google.appengine.api import taskqueue\n from sosbeacon.utils import insert_tasks\n\n queue_add_mock.side_effect = taskqueue.TombstonedTaskError\n\n tasks = [i for i in xrange(0, 7)]\n added = insert_tasks(tasks, 'default')\n\n self.assertEqual(added, 0)\n self.assertEqual(queue_add_mock.call_count, 13)", "def test_block_missing_batch(self):\n pass", "def test_no_chunk_size_no_n_splits_provided(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks([]))", "def _bulk_create_with_retry(\n self, table: IndexerTable, new_records: Sequence[BaseIndexer]\n ) -> None:\n retry_count = 0\n sleep_ms = 5\n last_seen_exception: Optional[BaseException] = None\n\n with metrics.timer(\"sentry_metrics.indexer.pg_bulk_create\"):\n # We use `ignore_conflicts=True` here to avoid race conditions where metric indexer\n # records might have be created between when we queried in `bulk_record` and the\n # attempt to create the rows down below.\n while retry_count + 1 < settings.SENTRY_POSTGRES_INDEXER_RETRY_COUNT:\n try:\n table.objects.bulk_create(new_records, ignore_conflicts=True)\n return\n except OperationalError as e:\n sentry_sdk.capture_message(\n f\"retryable deadlock exception encountered; pgcode={e.pgcode}, pgerror={e.pgerror}\"\n )\n if e.pgcode == DEADLOCK_DETECTED:\n metrics.incr(\"sentry_metrics.indexer.pg_bulk_create.deadlocked\")\n retry_count += 1\n sleep(sleep_ms / 1000 * (2**retry_count))\n last_seen_exception = e\n else:\n raise e\n # If we haven't returned after successful bulk create, we should re-raise the last\n # seen exception\n assert isinstance(last_seen_exception, BaseException)\n raise last_seen_exception", "async def test_delete_batch_invalid(database,valid_data):\n test_valid_insert_batch(database,valid_data)\n N = 10\n batch_id = 1\n for idx in range(N+1,N*2):\n try:\n await database.delete_batch(batch_id=batch_id,user_id=idx)\n assert False\n except:\n assert True\n await database.close_pool()", "def test_block_bad_batch(self):\n pass", "def test_block_missing_batch_dependency(self):\n pass", "def test_insert_batch(self, queue_mock):\n from sosbeacon.utils import insert_tasks\n\n tasks = []\n for i in xrange(1, 10):\n tasks.append(object())\n added = insert_tasks(tasks, 'default')\n self.assertEqual(added, 9)", "async def create_checkpoint_if_not_exists_async(self, partition_id):", "def test_integrity_error_bulk_create(self):\n link1, link2 = LinkFactory.create_batch(2)\n self.service.get_clicks_for_date.return_value = {\n unicode(link1.pk): '4',\n unicode(link2.pk): '7'\n }\n\n with patch.object(collect_ga_data, 'DataPoint') as MockDataPoint:\n MockDataPoint.objects.bulk_create.side_effect = IntegrityError\n\n with self.assertRaises(CommandError):\n self.command.execute()", "def test_check_existing_enqueues_tasks(self):\n collection = handlers_endpoints_v1.DigestCollection(\n namespace=handlers_endpoints_v1.Namespace())\n collection.items.append(\n generate_digest(collection.namespace.namespace, 'some content'))\n key = model.get_entry_key(\n collection.namespace.namespace, collection.items[0].digest)\n\n # guarantee that one digest already exists in the datastore\n model.new_content_entry(key).put()\n self.call_api('preupload', self.message_to_dict(collection), 200)\n\n # find enqueued tasks\n self.assertEqual(1, self.execute_tasks())", "def testTrainSplitError(self):\n with self.assertRaises(Exception):\n mnli.MnliDataset(\n mode='mismatched',\n split=tfds.Split.TRAIN,\n shuffle_buffer_size=20)", "def test_block_batches_order(self):\n pass", "def test_instantiating_salesforce_bulk_job_validates_operation(salesforce_session, bulk_request):\n with pytest.raises(AssertionError):\n SalesforceBulkJob('floob', 'Lead')", "def test_invalid_follow_task_true(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=None,\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_bulk_group_errors(self):\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_actg_missing_col)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n res = self.client.post(self.ag_url, data)\n assert res.status_code == status.HTTP_400_BAD_REQUEST", "def test_bulk_round_trip_with_backoff(self):\n self._test_bulk_round_trip(nodes=3, partitioner=\"murmur3\", num_operations=250000,\n copy_from_options={'MAXINFLIGHTMESSAGES': 64, 'MAXPENDINGCHUNKS': 1})", "def test_task_preloading(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n # Register\r\n self.register()\r\n self.signin()\r\n\r\n assigned_tasks = []\r\n # Get Task until scheduler returns None\r\n res = self.app.get('api/app/1/newtask')\r\n task1 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task1.get('info'), task1\r\n # Pre-load the next task for the user\r\n res = self.app.get('api/app/1/newtask?offset=1')\r\n task2 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task2.get('info'), task2\r\n # Check that both tasks are different\r\n assert task1.get('id') != task2.get('id'), \"Tasks should be different\"\r\n ## Save the assigned task\r\n assigned_tasks.append(task1)\r\n assigned_tasks.append(task2)\r\n\r\n # Submit an Answer for the assigned and pre-loaded task\r\n for t in assigned_tasks:\r\n tr = dict(app_id=t['app_id'], task_id=t['id'], info={'answer': 'No'})\r\n tr = json.dumps(tr)\r\n\r\n self.app.post('/api/taskrun', data=tr)\r\n # Get two tasks again\r\n res = self.app.get('api/app/1/newtask')\r\n task3 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task3.get('info'), task1\r\n # Pre-load the next task for the user\r\n res = self.app.get('api/app/1/newtask?offset=1')\r\n task4 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task4.get('info'), task2\r\n # Check that both tasks are different\r\n assert task3.get('id') != task4.get('id'), \"Tasks should be different\"\r\n assert task1.get('id') != task3.get('id'), \"Tasks should be different\"\r\n assert task2.get('id') != task4.get('id'), \"Tasks should be different\"\r\n # Check that a big offset returns None\r\n res = self.app.get('api/app/1/newtask?offset=11')\r\n assert json.loads(res.data) == {}, res.data", "def test_task_errors(self):\r\n user = User(\r\n email_addr=\"john.doe@example.com\",\r\n name=\"johndoe\",\r\n fullname=\"John Doe\",\r\n locale=\"en\")\r\n db.session.add(user)\r\n db.session.commit()\r\n user = db.session.query(User).first()\r\n app = App(\r\n name='Application',\r\n short_name='app',\r\n description='desc',\r\n owner_id=user.id)\r\n db.session.add(app)\r\n db.session.commit()\r\n\r\n task = Task(app_id=None)\r\n db.session.add(task)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()", "def acknowledged_by_batchlog_not_set_when_batchlog_write_fails_test(self):\n session = self.prepare(nodes=3, compression=False)\n # kill 2 of the 3 nodes (all the batchlog write candidates).\n [ node.stop(gently=False) for node in self.cluster.nodelist()[1:] ]\n self.assert_timedout(session, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", ConsistencyLevel.ONE, received_responses=0)", "def test_invalid_follow_task_false(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=None,\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_default_maximum_conflict(self):\n\n self.set_options(timeouts=True, timeout_maximum=1, timeout_default=10)\n task = self.create_task(self.context())\n with self.assertRaises(ErrorWhileTesting):\n task.execute()", "def test_task_run_errors(self):\r\n user = User(\r\n email_addr=\"john.doe@example.com\",\r\n name=\"johndoe\",\r\n fullname=\"John Doe\",\r\n locale=\"en\")\r\n db.session.add(user)\r\n db.session.commit()\r\n\r\n user = db.session.query(User).first()\r\n app = App(\r\n name='Application',\r\n short_name='app',\r\n description='desc',\r\n owner_id=user.id)\r\n db.session.add(app)\r\n db.session.commit()\r\n\r\n task = Task(app_id=app.id)\r\n db.session.add(task)\r\n db.session.commit()\r\n\r\n task_run = TaskRun(app_id=None, task_id=task.id)\r\n db.session.add(task_run)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()\r\n\r\n task_run = TaskRun(app_id=app.id, task_id=None)\r\n db.session.add(task_run)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()", "def test_synchronize_splits_error(self, mocker):\n storage = mocker.Mock(spec=SplitStorage)\n api = mocker.Mock()\n\n def run(x, c):\n raise APIException(\"something broke\")\n run._calls = 0\n api.fetch_splits.side_effect = run\n storage.get_change_number.return_value = -1\n\n from splitio.sync.split import SplitSynchronizer\n split_synchronizer = SplitSynchronizer(api, storage)\n\n with pytest.raises(APIException):\n split_synchronizer.synchronize_splits(1)", "def test_rollbacked_transaction_discard_task(self):\n\n @transaction.commit_on_success\n def do_something():\n my_task.delay()\n raise SpecificException\n try:\n do_something()\n except SpecificException:\n self.assertFalse(my_global)\n else:\n self.fail('Exception not raised')", "def retry_failed(FailAdmin, request, queryset):\n for task in queryset:\n async_task(task.func, *task.args or (), hook=task.hook, **task.kwargs or {})\n task.delete()", "def acknowledged_by_batchlog_set_when_batchlog_write_succeeds_test(self):\n session = self.prepare(nodes=3, compression=False)\n # kill one of the nodes so that batchlog will be written, but the write will fail.\n self.cluster.nodelist()[-1].stop(gently=False)\n self.assert_timedout(session, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", ConsistencyLevel.THREE, received_responses=2)", "def test_loopFailure_recovery(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n oldAssign = JobItem.assign\n @inlineCallbacks\n def _assign(self, when, overdue):\n work = yield self.workItem()\n if work.a == -2:\n raise ValueError(\"oops\")\n yield oldAssign(self, when, overdue)\n\n self.patch(JobItem, \"assign\", _assign)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Failing\n yield DummyWorkItem.makeJob(\n txn, a=-2, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=1, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60, 5)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertEqual(len(jobs), 1)\n self.assertEqual(jobs[0].assigned, None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertEqual(jobs[0].failed, 1)\n self.assertGreater(jobs[0].notBefore, datetime.datetime.utcnow() + datetime.timedelta(seconds=30))", "def _skip_aware_next_available_task_id(request, batch):\n def _get_skipped_task_ids_for_batch(session, batch_id):\n batch_id = str(batch_id)\n if 'skipped_tasks_in_batch' in session and \\\n batch_id in session['skipped_tasks_in_batch']:\n return session['skipped_tasks_in_batch'][batch_id]\n else:\n return None\n\n available_task_ids = batch.available_task_ids_for(request.user)\n skipped_ids = _get_skipped_task_ids_for_batch(request.session, batch.id)\n\n if skipped_ids:\n task_id = available_task_ids.exclude(id__in=skipped_ids).first()\n if not task_id:\n task_id = available_task_ids.filter(id__in=skipped_ids).first()\n if task_id:\n messages.info(request, 'Only previously skipped Tasks are available')\n\n # Once all remaining Tasks have been marked as skipped, we clear\n # their skipped status. If we don't take this step, then a Task\n # cannot be skipped a second time.\n request.session['skipped_tasks_in_batch'][str(batch.id)] = []\n request.session.modified = True\n else:\n task_id = available_task_ids.first()\n\n return task_id", "def bulkCreateTasks(request, *args, **kwargs):\n import settings\n\n # keep track of our own timelimit (20 seconds)\n timelimit = 20000\n timekeeper = Timekeeper(timelimit)\n\n post_dict = request.POST\n\n bulk_create_key = post_dict.get('bulk_create_key')\n if not bulk_create_key:\n return error_handler.logErrorAndReturnOK(\n 'Not all POST data specified in: %s' % post_dict)\n\n bulk_data = bulk_create_model.GCIBulkCreateData.get(bulk_create_key)\n if not bulk_data:\n return error_handler.logErrorAndReturnOK(\n 'No valid data found for key: %s' % bulk_create_key)\n\n # note that we only query for the quota once\n org_admin = bulk_data.created_by\n task_quota = org_logic.getRemainingTaskQuota(org_admin.scope)\n\n tasks = bulk_data.tasks\n while len(tasks) > 0:\n try:\n # check if we have time\n timekeeper.ping()\n\n if settings.GCI_TASK_QUOTA_LIMIT_ENABLED and task_quota <= 0:\n return error_handler.logErrorAndReturnOK(\n 'Task quota reached for %s' %(org_admin.scope.name))\n\n # remove the first task\n task_as_string = tasks.pop(0)\n\n loaded_task = simplejson.loads(task_as_string)\n task = {}\n for key, value in loaded_task.iteritems():\n # If we don't do this python will complain about kwargs not being\n # strings when we try to save the new task.\n task[key.encode('UTF-8')] = value\n\n logging.info('Uncleaned task: %s' %task)\n # clean the data\n errors = _cleanTask(task, org_admin)\n\n if errors:\n logging.warning(\n 'Invalid task data uploaded, the following errors occurred: %s'\n %errors)\n bulk_data.errors.append(db.Text(\n 'The task in row %i contains the following errors.\\n %s' \\\n %(bulk_data.tasksRemoved(), '\\n'.join(errors))))\n\n # at-most-once semantics for creating tasks\n bulk_data.put()\n\n if errors:\n # do the next task\n continue\n\n # set other properties\n task['link_id'] = 't%i' % (int(time.time()*100))\n task['scope'] = org_admin.scope\n task['scope_path'] = org_admin.scope_path\n task['program'] = org_admin.program\n task['status'] = 'Unpublished'\n task['created_by'] = org_admin\n task['modified_by'] = org_admin\n\n # create the new task\n logging.info('Creating new task with fields: %s' %task)\n task_logic.updateOrCreateFromFields(task)\n task_quota = task_quota - 1\n except DeadlineExceededError:\n # time to bail out\n pass\n\n if len(tasks) == 0:\n # send out a message\n notifications.sendBulkCreationCompleted(bulk_data)\n bulk_data.delete()\n else:\n # there is still work to be done, do a non 500 response and requeue\n task_params = {\n 'bulk_create_key': bulk_data.key().id_or_name()\n }\n new_task = taskqueue.Task(params=task_params,\n url=BULK_CREATE_URL)\n # add to the gci queue\n new_task.add(queue_name='gci-update')\n\n # we're done here\n return http.HttpResponse('OK')", "def transaction_failed_before_processing(self):", "async def test_delete_batch_valid(database, valid_data):\n await test_valid_insert_batch(database,valid_data)\n database = await Database.connect_pool()\n N = 10\n batch_id = 1\n for idx in range(N):\n await database.delete_batch(batch_id=batch_id,user_id=str(idx))\n await database.close_pool()", "def validate_batch(self, *args, **kwargs):\n raise NotImplementedError()", "def logged_batch_throws_uae_test(self):\n cursor = self.prepare(nodes=3)\n [ node.stop(wait_other_notice=True) for node in self.cluster.nodelist()[1:] ]\n cursor.consistency_level = 'ONE'\n assert_unavailable(cursor.execute, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\")", "def test_re_split_cell_raises_error(mock_amg):\n\n # splitting once should work as normal\n mock_amg.cells[4].split()\n\n with pytest.raises(ValueError):\n mock_amg.cells[4].split()", "async def _clean_up_batch_of_old_cache_invalidations(\n self, delete_up_to_millisec: int\n ) -> bool:\n\n def _clean_up_batch_of_old_cache_invalidations_txn(\n txn: LoggingTransaction,\n ) -> bool:\n # First get the earliest stream ID\n txn.execute(\n \"\"\"\n SELECT stream_id FROM cache_invalidation_stream_by_instance\n ORDER BY stream_id ASC\n LIMIT 1\n \"\"\"\n )\n row = txn.fetchone()\n if row is None:\n return False\n earliest_stream_id: int = row[0]\n\n # Then find the last stream ID of the range we will delete\n txn.execute(\n \"\"\"\n SELECT stream_id FROM cache_invalidation_stream_by_instance\n WHERE stream_id <= ? AND invalidation_ts <= ?\n ORDER BY stream_id DESC\n LIMIT 1\n \"\"\",\n (earliest_stream_id + CLEAN_UP_MAX_BATCH_SIZE, delete_up_to_millisec),\n )\n row = txn.fetchone()\n if row is None:\n return False\n cutoff_stream_id: int = row[0]\n\n # Determine whether we are caught up or still catching up\n txn.execute(\n \"\"\"\n SELECT invalidation_ts FROM cache_invalidation_stream_by_instance\n WHERE stream_id > ?\n ORDER BY stream_id ASC\n LIMIT 1\n \"\"\",\n (cutoff_stream_id,),\n )\n row = txn.fetchone()\n if row is None:\n in_backlog = False\n else:\n # We are in backlog if the next row could have been deleted\n # if we didn't have such a small batch size\n in_backlog = row[0] <= delete_up_to_millisec\n\n txn.execute(\n \"\"\"\n DELETE FROM cache_invalidation_stream_by_instance\n WHERE ? <= stream_id AND stream_id <= ?\n \"\"\",\n (earliest_stream_id, cutoff_stream_id),\n )\n\n return in_backlog\n\n return await self.db_pool.runInteraction(\n \"clean_up_old_cache_invalidations\",\n _clean_up_batch_of_old_cache_invalidations_txn,\n )", "def test_fail_repeated_buckelist_item(self):\r\n user = User.query.filter_by(email=\"test@test.com\").first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(\"test@test.com\", \"test\", bucketlist.id, \"test item\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '409 CONFLICT')\r\n self.assertEqual(result['message'], 'Bucketlist Item Exists')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertEqual(item_no, new_item_no)", "def test_loopFailure_failedRecovery(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n oldAssign = JobItem.assign\n @inlineCallbacks\n def _assign(self, when, overdue):\n work = yield self.workItem()\n if work.a == -2:\n raise ValueError(\"oops\")\n yield oldAssign(self, when, overdue)\n\n self.patch(JobItem, \"assign\", _assign)\n\n @inlineCallbacks\n def _failedToRun(self, locked=False, delay=None):\n raise ValueError(\"oops\")\n\n self.patch(JobItem, \"failedToRun\", _failedToRun)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Failing\n yield DummyWorkItem.makeJob(\n txn, a=-2, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=1, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60, 5)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertEqual(len(jobs), 2)\n self.assertEqual(jobs[0].assigned, None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertEqual(jobs[0].failed, 0)\n self.assertEqual(jobs[0].notBefore, fakeNow - datetime.timedelta(20 * 60))\n self.assertEqual(jobs[1].assigned, None)\n self.assertEqual(jobs[1].isAssigned, 0)\n self.assertEqual(jobs[1].failed, 0)\n self.assertEqual(jobs[1].notBefore, fakeNow - datetime.timedelta(20 * 60, 5))", "def test_import_error_record_is_updated_not_deleted_and_recreated(self, tmpdir):\n filename_to_parse = os.path.join(tmpdir, TEMP_DAG_FILENAME)\n\n # Generate original import error\n with open(filename_to_parse, \"w\") as file_to_parse:\n file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)\n session = settings.Session()\n self._process_file(filename_to_parse, dag_directory=tmpdir, session=session)\n\n import_error_1 = (\n session.query(errors.ImportError).filter(errors.ImportError.filename == filename_to_parse).one()\n )\n\n # process the file multiple times\n for _ in range(10):\n self._process_file(filename_to_parse, dag_directory=tmpdir, session=session)\n\n import_error_2 = (\n session.query(errors.ImportError).filter(errors.ImportError.filename == filename_to_parse).one()\n )\n\n # assert that the ID of the import error did not change\n assert import_error_1.id == import_error_2.id", "def test_delete_task_cascade(self):\r\n task = TaskFactory.create()\r\n task_runs = TaskRunFactory.create_batch(3, task=task)\r\n url = '/api/task/%s?api_key=%s' % (task.id, task.app.owner.api_key)\r\n res = self.app.delete(url)\r\n\r\n assert_equal(res.status, '204 NO CONTENT', res.data)\r\n task_runs = db.session.query(TaskRun)\\\r\n .filter_by(task_id=task.id)\\\r\n .all()\r\n assert len(task_runs) == 0, \"There should not be any task run for task\"", "def test_retry_errors_sooner(self):\n config_manager, json_file = self._setup_config_manager(\n 'socorro.unittest.cron.test_crontabber.BarBackfillJob|1d\\n'\n 'socorro.unittest.cron.test_crontabber.FooBackfillJob|1d\\n'\n 'socorro.unittest.cron.test_crontabber.FooBarBackfillJob|1d',\n extra_value_source={\n # crontabber already has a good default for this but by\n # being explict like this we not only show that it can be\n # changed, we also make it clear what the unit test is\n # supposed to do.\n 'crontabber.error_retry_time': '3600' # 1 hour\n }\n )\n\n # first we need to hack-about so that BarBackfillJob fails only\n # once.\n\n class SomeError(Exception):\n pass\n\n def nosy_run(self, date):\n dates_used[self.__class__].append(date)\n if self.__class__ == BarBackfillJob:\n if len(dates_used[self.__class__]) == 1:\n # first time run, simulate trouble\n raise SomeError(\"something went wrong\")\n return originals[self.__class__](self, date)\n\n classes = BarBackfillJob, FooBackfillJob, FooBarBackfillJob\n originals = {}\n dates_used = collections.defaultdict(list)\n for klass in classes:\n originals[klass] = klass.run\n klass.run = nosy_run\n\n try:\n with config_manager.context() as config:\n tab = crontabber.CronTabber(config)\n tab.run_all()\n self.assertEqual(len(dates_used[FooBackfillJob]), 1)\n self.assertEqual(len(dates_used[FooBackfillJob]), 1)\n # never gets there because dependency fails\n self.assertEqual(len(dates_used[FooBarBackfillJob]), 0)\n\n structure = json.load(open(json_file))\n assert structure['foo-backfill']\n assert not structure['foo-backfill']['last_error']\n next_date = utc_now() + datetime.timedelta(days=1)\n assert (\n next_date.strftime('%Y-%m-%d %H:%M') in\n structure['foo-backfill']['next_run']\n )\n\n assert structure['bar-backfill']\n assert structure['bar-backfill']['last_error']\n next_date = utc_now() + datetime.timedelta(hours=1)\n assert (\n next_date.strftime('%Y-%m-%d %H:%M') in\n structure['bar-backfill']['next_run']\n )\n\n assert 'foobar-backfill' not in structure\n\n # Now, let the magic happen, we pretend time passes by 2 hours\n # and run all jobs again\n self._wind_clock(json_file, hours=2)\n # this forces in crontabber instance to reload the JSON file\n tab._database = None\n\n # here, we go two hours later\n tab.run_all()\n\n # Here's the magic sauce! The FooBarBackfillJob had to wait\n # two hours to run after FooBackfillJob but it should\n # have been given the same date input as when FooBackfillJob\n # ran.\n self.assertEqual(len(dates_used[FooBackfillJob]), 1)\n self.assertEqual(len(dates_used[FooBackfillJob]), 1)\n self.assertEqual(len(dates_used[FooBarBackfillJob]), 1)\n\n # use this formatter so that we don't have to compare\n # datetimes with microseconds\n format = lambda x: x.strftime('%Y%m%d %H:%M %Z')\n self.assertEqual(\n format(dates_used[FooBackfillJob][0]),\n format(dates_used[FooBarBackfillJob][0])\n )\n # also check the others\n self.assertEqual(\n format(dates_used[BarBackfillJob][0]),\n format(dates_used[FooBarBackfillJob][0])\n )\n\n structure = json.load(open(json_file))\n self.assertTrue(structure['foo-backfill'])\n self.assertTrue(not structure['foo-backfill']['last_error'])\n self.assertTrue(structure['bar-backfill'])\n self.assertTrue(not structure['bar-backfill']['last_error'])\n self.assertTrue(structure['foobar-backfill'])\n self.assertTrue(not structure['foobar-backfill']['last_error'])\n\n finally:\n for klass in classes:\n klass.run = originals[klass]", "async def test_delete_invalid(database,valid_data):\n test_valid_insert(database,valid_data)\n N = 10\n for idx in range(N+1,N*2):\n try:\n await database.delete(_id=idx,user_id=idx)\n assert False\n except:\n assert True\n await database.close_pool()", "def test_loopFailure_noRecovery(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n oldNextJob = JobItem.nextjob\n @inlineCallbacks\n def _nextJob(cls, txn, now, minPriority, rowLimit):\n job = yield oldNextJob(txn, now, minPriority, rowLimit)\n work = yield job.workItem()\n if work.a == -2:\n raise ValueError(\"oops\")\n\n self.patch(JobItem, \"nextjob\", classmethod(_nextJob))\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Failing\n yield DummyWorkItem.makeJob(\n txn, a=-2, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=1, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60, 5)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertEqual(len(jobs), 2)\n self.assertEqual(jobs[0].assigned, None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertEqual(jobs[0].failed, 0)\n self.assertEqual(jobs[0].notBefore, fakeNow - datetime.timedelta(20 * 60))\n self.assertEqual(jobs[1].assigned, None)\n self.assertEqual(jobs[1].isAssigned, 0)\n self.assertEqual(jobs[1].failed, 0)\n self.assertEqual(jobs[1].notBefore, fakeNow - datetime.timedelta(20 * 60, 5))", "def _test_retry_after_unlimited_retry_error(self, exception):\r\n num_emails = 8\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = 0\r\n expected_succeeds = num_emails\r\n # Note that because celery in eager mode will call retries synchronously,\r\n # each retry will increase the stack depth. It turns out that there is a\r\n # maximum depth at which a RuntimeError is raised (\"maximum recursion depth\r\n # exceeded\"). The maximum recursion depth is 90, so\r\n # num_emails * expected_retries < 90.\r\n expected_retries = 10\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # Cycle through N throttling errors followed by a success.\r\n get_conn.return_value.send_messages.side_effect = cycle(\r\n chain(repeat(exception, expected_retries), [None])\r\n )\r\n self._test_run_with_task(\r\n send_bulk_course_email,\r\n 'emailed',\r\n num_emails,\r\n expected_succeeds,\r\n failed=expected_fails,\r\n retried_nomax=(expected_retries * num_emails)\r\n )", "async def test_invalid_insert_user_duplicate_key(database):\n await database.setup_database(reset=True)\n await database.insert_user(\"\")\n for user_id in zip([\"1\" for _ in range(0,10)]):\n try:\n await database.insert_user(user_id=user_id)\n assert False\n except:\n assert True\n await database.close_pool()", "def test_sync_object_task_retries_on_error(monkeypatch, es):\n sync_object_mock = Mock(side_effect=[Exception, None])\n monkeypatch.setattr('datahub.search.sync_object.sync_object', sync_object_mock)\n\n sync_object_task.apply(args=(SimpleModelSearchApp.name, str(uuid4())))\n\n assert sync_object_mock.call_count == 2", "def _test_undefined_problem(self, task_class):\r\n task_entry = self._create_input_entry()\r\n with self.assertRaises(ItemNotFoundError):\r\n self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id)", "async def test_invalid_insert_duplicate_key(database, duplicate_data):\n await database.setup_database(reset=True)\n await database.insert_user(\"1\")\n\n await database.insert(id=1,user_id=\"1\",embeddings=[1,2])\n for id,user_id,embeddings,batch_id in duplicate_data:\n try:\n await database.insert(id=id,\n user_id=user_id,\n embeddings=embeddings,\n batch_id=batch_id)\n assert False\n except:\n assert True\n await database.close_pool()", "def logged_batch_throws_uae_test(self):\n session = self.prepare(nodes=3)\n [ node.stop(wait_other_notice=True) for node in self.cluster.nodelist()[1:] ]\n session.consistency_level = 'ONE'\n assert_unavailable(session.execute, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\")", "def test_task_failed(self):\n\n task1 = FailedTask(mock.Mock(), total_retries=0)\n task2 = mock.Mock(execute_after=0)\n\n g = TaskDependencyGraph(MockWorkflowContext())\n seq = g.sequence()\n seq.add(task1, task2)\n\n with limited_sleep_mock():\n self.assertRaisesRegex(WorkflowFailed, 'failtask', g.execute)\n self.assertTrue(task1.is_terminated)\n self.assertFalse(task2.apply_async.called)", "def aknowledged_by_batchlog_not_set_when_batchlog_write_fails_test(self):\n cursor = self.prepare(nodes=3)\n # kill 2 of the 3 nodes (all the batchlog write candidates).\n [ node.stop(gently=False) for node in self.cluster.nodelist()[1:] ]\n self.assert_timedout(cursor, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", ConsistencyLevel.ONE, acknowledged_by_batchlog=False)", "def test_raise_enum_upsert_schema_error(data_row, mdo):\n\n metadata = DataRowMetadata(data_row_id=data_row.uid,\n fields=[\n DataRowMetadataField(schema_id=TEST_SPLIT_ID,\n value=SPLIT_SCHEMA_ID),\n ])\n with pytest.raises(ValueError):\n mdo.bulk_upsert([metadata])", "def test_failedUpload(self):\n def _storeObject(content, contentType, metadata={}, created=None,\n objectId=None):\n raise ValueError('blah blah')\n object.__setattr__(self.backendStore, 'storeObject', _storeObject)\n\n nextScheduled = self.pendingUpload.scheduled + timedelta(minutes=5)\n def _nextAttempt():\n return nextScheduled\n object.__setattr__(self.pendingUpload, '_nextAttempt', _nextAttempt)\n\n self.successResultOf(self.pendingUpload.attemptUpload())\n self.assertIdentical(self.store.findUnique(_PendingUpload),\n self.pendingUpload)\n self.assertEquals(self.pendingUpload.scheduled,\n nextScheduled)\n errors = self.flushLoggedErrors(ValueError)\n self.assertEquals(len(errors), 1)", "def test_after_batch_injection(self):\n intkey_batch1 = create_batch(self.signer, [('inc', 'abcd', 30)])\n intkey_batch2 = create_batch(self.signer, [('inc', 'abcd', 40)])\n intkey_batch3 = create_batch(self.signer, [('inc', 'abcd', 50)])\n batches = create_batch_list(\n [intkey_batch1, intkey_batch2, intkey_batch3])\n\n post_batch(batches)\n\n # Assert injected batches are after each intkey transaction\n # get last committed block (first from the list)\n last_block = get_blocks()[0]\n\n family_name = get_family_from(last_block['batches'][0])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][0]))\n self.assertEqual(payload.get('Name'), 'abcd')\n family_name = get_family_from(last_block['batches'][1])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][1]))\n self.assertEqual(payload.get('Name'), 'inject')\n\n family_name = get_family_from(last_block['batches'][2])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][2]))\n self.assertEqual(payload.get('Name'), 'abcd')\n family_name = get_family_from(last_block['batches'][3])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][3]))\n self.assertEqual(payload.get('Name'), 'inject')\n\n family_name = get_family_from(last_block['batches'][4])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][4]))\n self.assertEqual(payload.get('Name'), 'abcd')\n family_name = get_family_from(last_block['batches'][5])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][5]))\n self.assertEqual(payload.get('Name'), 'inject')", "async def test_valid_insert_batch(database,valid_data):\n await database.setup_database(reset=True)\n data = []\n for _id,user_id,embeddings,batch_id in valid_data: #pylint: disable=unused-variable\n await database.insert_user(user_id)\n data.append((_id,user_id,embeddings,1))\n await database.insert_batch(data)\n await database.close_pool()", "def _enqueue_task(self):\n\t\t# TODO(bslatkin): Remove these retries when they're not needed in userland.\n\t\tRETRIES = 3\n\t\ttarget_queue = os.environ.get('X_APPENGINE_QUEUENAME', constants.FEED_QUEUE)\n\t\tfor i in xrange(RETRIES):\n\t\t\ttry:\n\t\t\t\ttaskqueue.Task(\n\t\t\t\t\t\turl='/work/pull_feeds',\n\t\t\t\t\t\teta=self.eta,\n\t\t\t\t\t\tparams={'topic': self.topic}\n\t\t\t\t\t\t).add(target_queue)\n\t\t\texcept (taskqueue.Error, apiproxy_errors.Error):\n\t\t\t\tlogging.exception('Could not insert task to fetch topic = %s',\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.topic)\n\t\t\t\tif i == (RETRIES - 1):\n\t\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn", "def test_before_batch_injection(self):\n intkey_batch1 = create_batch(self.signer, [('set', 'abcd', 0)])\n intkey_batch2 = create_batch(self.signer, [('inc', 'abcd', 10)])\n intkey_batch3 = create_batch(self.signer, [('inc', 'abcd', 20)])\n batches = create_batch_list(\n [intkey_batch1, intkey_batch2, intkey_batch3])\n\n post_batch(batches)\n\n # Assert injected batches are before each intkey transaction\n # get last committed block (first from the list)\n last_block = get_blocks()[0]\n\n family_name = get_family_from(last_block['batches'][0])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][0]))\n self.assertEqual(payload.get('Name'), 'inject')\n family_name = get_family_from(last_block['batches'][1])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][1]))\n self.assertEqual(payload.get('Name'), 'abcd')\n\n family_name = get_family_from(last_block['batches'][2])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][2]))\n self.assertEqual(payload.get('Name'), 'inject')\n family_name = get_family_from(last_block['batches'][3])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][3]))\n self.assertEqual(payload.get('Name'), 'abcd')\n\n family_name = get_family_from(last_block['batches'][4])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][4]))\n self.assertEqual(payload.get('Name'), 'inject')\n family_name = get_family_from(last_block['batches'][5])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][5]))\n self.assertEqual(payload.get('Name'), 'abcd')", "def test_bulk_round_trip_with_timeouts(self):\n self._test_bulk_round_trip(nodes=1, partitioner=\"murmur3\", num_operations=100000,\n configuration_options={'range_request_timeout_in_ms': '200',\n 'write_request_timeout_in_ms': '100'},\n copy_from_options={'MAXINSERTERRORS': -1},\n skip_count_checks=True)", "def _retry(*, task, signature_kwargs, retries):\n if retries < MAX_RETRIES:\n step = task.signature(**signature_kwargs)\n queue = step.options.get(\"queue\", task.queue)\n step.options[\"queue\"] = f\"{queue}-delay\"\n step.kwargs[\"retries\"] = retries + 1\n on_commit(step.apply_async)\n else:\n raise MaxRetriesExceededError", "def test_check_chunk_n(self):\n st, frontend_setup = self.get_st_and_fill_frontends()\n\n sf = st.storage[0]\n st_new = st.new_context()\n st_new.storage = [sf]\n key = st_new.key_for(self.run_id, self.target)\n backend, backend_key = sf.find(key, **st_new._find_options)\n prefix = strax.storage.files.dirname_to_prefix(backend_key)\n md = st_new.get_metadata(self.run_id, self.target)\n md['chunks'][0]['n'] += 1\n md_path = os.path.join(backend_key, f'{prefix}-metadata.json')\n with open(md_path, \"w\") as file:\n json.dump(md, file, indent=4)\n\n with self.assertRaises(strax.DataCorrupted):\n assert st_new.is_stored(self.run_id, self.target)\n st_new.get_array(self.run_id, self.target)", "def test_anonymous_user_create_repeated_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n task = TaskFactory.create()\r\n taskrun1 = AnonymousTaskRunFactory.create(task=task)\r\n taskrun2 = AnonymousTaskRunFactory.build(task=task)\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').create,\r\n taskrun2)\r\n\r\n # But the user can still create taskruns for different tasks\r\n task2 = TaskFactory.create(app=task.app)\r\n taskrun3 = AnonymousTaskRunFactory.build(task=task2)\r\n assert_not_raises(Exception,\r\n getattr(require, 'taskrun').create,\r\n taskrun3)", "def reFragmentMissedTasks(missedTasks, options):\n options.chunk=1+(options.chunk*len(missedTasks)/options.splits)\n temporaryLocation=\"%s%stmp\"%(options.tmpDir,os.sep)\n os.makedirs(temporaryLocation)\n\n fileType = getFileType(options, None)\n\n # create a fileHandle-like object that will read all missed fragments\n inputsToReFragment=[getFragmentPath(options.tmpDir, options.fragBase, i) for i in missedTasks]\n logging.info(\"Restarting fragments: %s\" % missedTasks)\n logging.debug(\"Restarting fragments: %s\" % inputsToReFragment)\n failedRecordStream = fileinput.input(inputsToReFragment)\n\n # create new fragments in temporaryLocation\n newFragNum=fragmentInputStreamBySize(failedRecordStream, temporaryLocation,\n options.chunk, fileType,\n options.fragBase,\n splitOnSize=options.splitOnSize,\n suffix=options.fragSuff)\n\n # remove old fragments\n for i in missedTasks:\n frag = getFragmentPath(options.tmpDir, options.fragBase, i)\n os.remove(frag)\n\n return newFragNum+1", "def test_dupe_urls(self):\n with self.assertRaises(IntegrityError) as context:\n Bookmark.objects.create(name=\"Bookmark 2\",\n url=\"http://www.example.com\")\n self.assertTrue('UNIQUE constraint failed' in context.exception)", "def aknowledged_by_batchlog_set_when_batchlog_write_succeeds_test(self):\n cursor = self.prepare(nodes=3)\n # kill one of the nodes so that batchlog will be written, but the write will fail.\n self.cluster.nodelist()[-1].stop(gently=False)\n self.assert_timedout(cursor, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", ConsistencyLevel.THREE, acknowledged_by_batchlog=True)", "def test_batch_passed_through(self, get_task_mock, insert_tasks_mock):\n from sosbeacon.event.message import broadcast_to_groups\n\n group_key = Mock()\n group_key.id.return_value = 'SomeGroup'\n\n event_key = Mock()\n\n message_key = Mock()\n message_key.urlsafe.return_value = 'abc'\n\n broadcast_to_groups([group_key], event_key, message_key, '')\n\n self.assertEqual(insert_tasks_mock.call_count, 1)\n\n get_task_mock.assert_called_once_with(\n group_key, event_key, message_key, '')", "def test_retry_run(self):\n pass", "async def test_invalid_insert_no_user(database, valid_data):\n await database.setup_database(reset=True)\n for id,user_id,embeddings,batch_id in valid_data:\n try:\n await database.insert(id=id,\n user_id=user_id,\n embeddings=embeddings,\n batch_id=batch_id)\n assert False\n except(NotFoundError, DuplicateKeyError):\n assert True\n await database.close_pool()", "def test_exceptionWhenWorking(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=1, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n\n # Error\n yield DummyWorkItem.makeJob(\n txn, a=-1, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=2, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n yield setup\n clock.advance(20 - 12)\n\n # Work item complete\n self.assertTrue(DummyWorkItem.results == {1: 1, 3: 2})", "def test_exceptionUnassign(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Next, create failing work that's actually far enough into the past to run.\n yield DummyWorkItem.makeJob(\n txn, a=-1, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertTrue(len(jobs) == 1)\n self.assertTrue(jobs[0].assigned is None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertTrue(jobs[0].failed == 1)\n self.assertTrue(jobs[0].notBefore > datetime.datetime.utcnow())", "def test_exception_in_all_worker_process(self):\n pool = ProcessPool(5)\n pool.start(ExceptionGeneratingWorker_5)\n with self.assertRaises(RuntimeError):\n for _ in range(10000):\n pool.ventilate(\"Datanum\")\n time.sleep(.1)", "def _check_task(self, task: Task) -> bool:\n try:\n extents = list(fiemap(task.path, sync=task.frequency > 1))\n except OSError:\n self.logger.error('Error#%d %s', task.id, task.path, exc_info=True)\n return False\n\n if not extents:\n return False\n\n planner = Planner(self.planner_params, extents)\n clusters = planner.result()\n\n if not clusters:\n return False\n\n task.extents = extents\n task.clusters = clusters\n\n return True", "def test_chunk_size_has_priority_over_n_splits(self):\n chunks = list(chunk_tasks(range(4), chunk_size=4, n_splits=4))\n self.assertEqual(len(chunks), 1)\n self.assertEqual(len(chunks[0]), 4)\n self.assertEqual(list(range(4)), list(chain.from_iterable(chunks)))", "def test_add(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n new_task = self.task_storage.find(self.my_task.key)\n new_task.key = self.task_storage.add(new_task)\n\n self.assertNotEqual(self.my_task.key, new_task.key)\n self.task_storage.delete(new_task.key)", "def test_cant_allocate_partitioner(self):\n def just_raise(*a, **kw):\n raise ValueError(\"Something went wrong!\")\n self.client.SetPartitioner = just_raise\n partitioner = self.tx_client.SetPartitioner(\"xyzzy\", set([1, 2, 3]))\n self.assertTrue(partitioner.failed)", "def test_generator_without_iterable_len(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks(iter([]), n_splits=1))", "def _createOwnPartition(self, databaseCursor, uniqueItems):\n self.logger.debug(\"%s - in createOwnPartition for %s\",threading.currentThread().getName(),self.name)\n for x in uniqueItems:\n #self.logger.debug(\"DEBUG - item value is %s\",x)\n partitionCreationParameters = self.partitionCreationParameters(x)\n partitionName = self.partitionNameTemplate % partitionCreationParameters[\"partitionName\"]\n if partitionWasCreated(partitionName):\n #self.logger.debug(\"DEBUG - skipping creation of %s\",partitionName)\n continue\n partitionCreationSql = self.partitionCreationSqlTemplate % partitionCreationParameters\n #self.logger.debug(\"%s - Sql for %s is %s\",threading.currentThread().getName(),self.name,partitionCreationSql)\n aPartition = Table(name=partitionName, logger=self.logger, creationSql=partitionCreationSql)\n self.logger.debug(\"%s - savepoint createPartitions_%s\",threading.currentThread().getName(), partitionName)\n databaseCursor.execute(\"savepoint createPartitions_%s\" % partitionName)\n try:\n self.logger.debug(\"%s - creating %s\", threading.currentThread().getName(), partitionName)\n aPartition._createSelf(databaseCursor)\n markPartitionCreated(partitionName)\n self.logger.debug(\"%s - successful - releasing savepoint\", threading.currentThread().getName())\n databaseCursor.execute(\"release savepoint createPartitions_%s\" % partitionName)\n except pg.ProgrammingError, x:\n self.logger.debug(\"%s -- Rolling back and releasing savepoint: Creating %s failed in createPartitions: %s\", threading.currentThread().getName(), partitionName, str(x).strip())\n databaseCursor.execute(\"rollback to createPartitions_%s; release savepoint createPartitions_%s;\" % (partitionName, partitionName))\n databaseCursor.connection.commit()", "def test_aborting_batch(created_job, bulk_request):\n bulk_request.reset_mock()\n\n assert created_job.is_open\n\n created_job.abort()\n\n assert not created_job.job\n assert not created_job.job_url\n assert not created_job.pending_batches\n assert not created_job.is_open\n\n bulk_request.assert_called_once_with(\n 'post',\n 'https://salesforce/services/async/34.0/job/THEJOBID',\n data=XMLMatcher('''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <jobInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <state>Aborted</state>\n </jobInfo>\n '''),\n expected_response=200\n )", "def test_delete_write_fail(self):\n self.task_storage.add(self.my_task)\n os.chmod(self.test_task_filename, 0400)\n\n self.assertRaises(IOError, self.task_storage.delete, self.my_task.key)", "def validate_batch(self, *arg, **kwargs):\n pass", "def logged_batch_doesnt_throw_uae_test(self):\n cursor = self.prepare(nodes=3)\n self.cluster.nodelist()[-1].stop(wait_other_notice=True)\n cursor.execute(\"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", consistency_level=\"ANY\")\n assert True", "def test_temporaryFailure(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Next, create failing work that's actually far enough into the past to run.\n yield DummyWorkItem.makeJob(\n txn, a=-2, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertTrue(len(jobs) == 1)\n self.assertTrue(jobs[0].assigned is None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertTrue(jobs[0].failed == 1)\n self.assertTrue(jobs[0].notBefore > datetime.datetime.utcnow() + datetime.timedelta(seconds=90))", "def test_task_add():\n pytest.fail('Not implemented yet.')", "def test_create_task_notebook_or_task_id_error(self):\n task_id = util.MOCK_UUID_1\n experiment_notebook = {\n \"cells\": [],\n \"metadata\": {},\n \"nbformat\": 4,\n \"nbformat_minor\": 4,\n }\n rv = TEST_CLIENT.post(\n TASK_ROUTE,\n json={\n \"copyFrom\": task_id,\n \"experimentNotebook\": experiment_notebook,\n },\n )\n result = rv.json()\n\n expected = {\n \"message\": \"Either provide notebooks or a task to copy from\",\n \"code\": \"MissingRequiredNotebookOrTaskId\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)", "def _fail_on_bad_torque_start(self):\n for bundle in self._model.batch_get_bundles(state=State.WAITING_FOR_WORKER_STARTUP, bundle_type='run'):\n failure_message = self._read_torque_error_log(bundle.metadata.job_handle)\n if failure_message is None and time.time() - bundle.metadata.last_updated > 20 * 60:\n failure_message = 'Worker failed to start. You may have requested too many resources.'\n if failure_message is not None:\n logger.info('Failing %s: %s', bundle.uuid, failure_message)\n self._model.update_bundle(\n bundle, {'state': State.FAILED,\n 'metadata': {'failure_message': failure_message}})", "def test_anonymous_03_respects_limit_tasks(self):\r\n # Del previous TaskRuns\r\n self.del_task_runs()\r\n\r\n assigned_tasks = []\r\n # Get Task until scheduler returns None\r\n for i in range(10):\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n while data.get('info') is not None:\r\n # Check that we received a Task\r\n assert data.get('info'), data\r\n\r\n # Save the assigned task\r\n assigned_tasks.append(data)\r\n\r\n # Submit an Answer for the assigned task\r\n tr = TaskRun(app_id=data['app_id'], task_id=data['id'],\r\n user_ip=\"127.0.0.\" + str(i),\r\n info={'answer': 'Yes'})\r\n db.session.add(tr)\r\n db.session.commit()\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n # Check if there are 30 TaskRuns per Task\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n for t in tasks:\r\n assert len(t.task_runs) == 10, len(t.task_runs)\r\n # Check that all the answers are from different IPs\r\n err_msg = \"There are two or more Answers from same IP\"\r\n for t in tasks:\r\n for tr in t.task_runs:\r\n assert self.is_unique(tr.user_ip, t.task_runs), err_msg", "def test_merge_backup_with_failover_logs(self):\n self.log.info(\"Load 1st batch docs\")\n create_gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen1, \"create\", 0)\n failed_persisted_bucket = []\n rest = RestConnection(self.master)\n cluster_nodes = rest.get_nodes()\n for bucket in self.buckets:\n ready = RebalanceHelper.wait_for_stats_on_all(self.backupset.cluster_host,\n bucket.name, 'ep_queue_size',\n 0, timeout_in_seconds=120)\n if not ready:\n failed_persisted_bucket.append(bucket.name)\n if failed_persisted_bucket:\n self.fail(\"Buckets %s did not persisted.\" % failed_persisted_bucket)\n self.log.info(\"Stop persistence at each node\")\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for bucket in self.buckets:\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n bucket.name))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n create_gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen2, \"create\", 0)\n self.sleep(5)\n self.log.info(\"Crash cluster via kill memcached\")\n for node in clusters:\n for server in self.servers:\n if node.ip == server.ip:\n num_entries = 4\n reach_num_entries = False\n while not reach_num_entries:\n shell = RemoteMachineShellConnection(server)\n shell.kill_memcached()\n ready = False\n while not ready:\n if not RestHelper(RestConnection(server)).is_ns_server_running():\n self.sleep(10)\n else:\n ready = True\n cmd = \"%scbstats%s %s:11210 failovers -u %s -p %s | grep num_entries \" \\\n \"| gawk%s '{printf $2}' | grep -m 5 '4\\|5\\|6\\|7'\" \\\n % (self.cli_command_location, self.cmd_ext, server.ip,\n \"cbadminbucket\", \"password\", self.cmd_ext)\n output, error = shell.execute_command(cmd)\n shell.disconnect()\n if output:\n self.log.info(\"number failover logs entries reached. %s \" % output)\n reach_num_entries = True\n self.backup_create()\n self.log.info(\"Start backup data\")\n self.backup_cluster()\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n self.log.info(\"Load 3rd batch docs\")\n create_gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen3, \"create\", 0)\n self.backup_cluster()\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)", "def test_add_write_fail(self):\n os.chmod(self.test_task_filename, 0400)\n\n self.assertRaises(IOError, self.task_storage.add, self.my_task)", "def test_block_extra_batch(self):\n pass", "def test_user_03_respects_limit_tasks(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n assigned_tasks = []\r\n # We need one extra loop to allow the scheduler to mark a task as completed\r\n for i in range(11):\r\n self.register(fullname=self.user.username + str(i),\r\n name=self.user.username + str(i),\r\n password=self.user.username + str(i))\r\n self.signin()\r\n # Get Task until scheduler returns None\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n while data.get('info') is not None:\r\n # Check that we received a Task\r\n assert data.get('info'), data\r\n\r\n # Save the assigned task\r\n assigned_tasks.append(data)\r\n\r\n # Submit an Answer for the assigned task\r\n tr = dict(app_id=data['app_id'], task_id=data['id'],\r\n info={'answer': 'No'})\r\n tr = json.dumps(tr)\r\n self.app.post('/api/taskrun', data=tr)\r\n\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n self.signout()\r\n\r\n # Check if there are 30 TaskRuns per Task\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n for t in tasks:\r\n assert len(t.task_runs) == 10, t.task_runs\r\n # Check that all the answers are from different IPs\r\n err_msg = \"There are two or more Answers from same User\"\r\n for t in tasks:\r\n for tr in t.task_runs:\r\n assert self.is_unique(tr.user_id, t.task_runs), err_msg\r\n # Check that task.state is updated to completed\r\n for t in tasks:\r\n assert t.state == \"completed\", t.state", "def create_task(conn):\r\n for i in range(0, len(s), 3):\r\n sql = ''' INSERT OR IGNORE INTO tasks(id,city_name,state)\r\n VALUES(?,?,?) '''\r\n task = (s[i],s[i+1],s[i+2])\r\n cur = conn.cursor()\r\n cur.execute(sql,task)\r\n conn.commit()\r\n return \"done\"", "async def test_exectution_limit_once(coresys: CoreSys, loop: asyncio.BaseEventLoop):\n\n class TestClass:\n \"\"\"Test class.\"\"\"\n\n def __init__(self, coresys: CoreSys):\n \"\"\"Initialize the test class.\"\"\"\n self.coresys = coresys\n self.run = asyncio.Lock()\n\n @Job(limit=JobExecutionLimit.ONCE, on_condition=JobException)\n async def execute(self, sleep: float):\n \"\"\"Execute the class method.\"\"\"\n assert not self.run.locked()\n async with self.run:\n await asyncio.sleep(sleep)\n\n test = TestClass(coresys)\n run_task = loop.create_task(test.execute(0.3))\n\n await asyncio.sleep(0.1)\n with pytest.raises(JobException):\n await test.execute(0.1)\n\n await run_task", "def test_instantiating_salesforce_bulk_job_validates_object(salesforce_session, bulk_request):\n with pytest.raises(AssertionError):\n SalesforceBulkJob('update', 'lead')\n with pytest.raises(AssertionError):\n SalesforceBulkJob('update', 'Floob')", "def _test_retry_after_limited_retry_error(self, exception):\r\n # If we want the batch to succeed, we need to send fewer emails\r\n # than the max retries, so that the max is not triggered.\r\n num_emails = settings.BULK_EMAIL_MAX_RETRIES\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = 0\r\n expected_succeeds = num_emails\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # Have every other mail attempt fail due to disconnection.\r\n get_conn.return_value.send_messages.side_effect = cycle([exception, None])\r\n self._test_run_with_task(\r\n send_bulk_course_email,\r\n 'emailed',\r\n num_emails,\r\n expected_succeeds,\r\n failed=expected_fails,\r\n retried_withmax=num_emails\r\n )", "def testMarkTaskAsMerging(self):\n redis_client = self._CreateRedisClient()\n\n session = sessions.Session()\n task = tasks.Task(session_identifier=session.identifier)\n\n # Trying to mark a task as merging without finalizing it raises an error.\n with self.assertRaises(IOError):\n redis_store.RedisStore.MarkTaskAsMerging(\n task.identifier, session.identifier, redis_client=redis_client)\n\n # Opening and closing a writer for a task should cause the task to be marked\n # as complete.\n storage_writer = writer.RedisStorageWriter(\n storage_type=definitions.STORAGE_TYPE_TASK)\n storage_writer.Open(\n redis_client=redis_client, session_identifier=task.session_identifier,\n task_identifier=task.identifier)\n storage_writer.Close()\n\n redis_store.RedisStore.MarkTaskAsMerging(\n task.identifier, session.identifier, redis_client=redis_client)", "def test_add_raises_catch():\n with pytest.raises(TypeError):\n tasks.add(task='not a Task object')", "def test_update_write_fail(self):\n self.task_storage.add(self.my_task)\n os.chmod(self.test_task_filename, 0400)\n\n self.assertRaises(IOError, self.task_storage.update, self.my_task)", "def prepare_run(self, **kwargs):\n super().prepare_run(**kwargs)\n with open(\n os.path.join(self.rally_dir, 'rally_jobs.yaml'),\n 'r', encoding='utf-8') as task_file:\n self.task_yaml = yaml.safe_load(task_file)\n\n for task in self.task_yaml:\n if task not in self.tests:\n raise Exception(f\"Test '{task}' not in '{self.tests}'\")", "def testSiblingDAGConsistency(self):\n options = Job.Runner.getDefaultOptions(self._createTempDir() + '/jobStore')\n options.clean = 'always'\n options.logLevel = 'debug'\n i = Job.wrapJobFn(diamond)\n with Toil(options) as toil:\n try:\n toil.start(i)\n except FailedJobsException:\n # we expect this exception to be raised\n pass\n else:\n self.fail()", "def test_spanner_indexer_insert_batch_no_conflict_does_not_trigger_individual_inserts(\n mock, testing_indexer\n):\n codec = IdCodec()\n\n model1_id = get_id()\n key_results1 = KeyResults()\n model1 = SpannerIndexerModel(\n id=codec.encode(model1_id),\n decoded_id=model1_id,\n string=get_random_string(10),\n organization_id=55555,\n date_added=datetime.now(),\n last_seen=datetime.now(),\n retention_days=55,\n )\n testing_indexer._insert_db_records(UseCaseKey.PERFORMANCE, [model1], key_results1)\n\n # Insert the same record with a different id but the key result would\n # have the id of model1.\n key_results2 = KeyResults()\n model2_id = get_id()\n model2 = SpannerIndexerModel(\n id=codec.encode(model2_id),\n decoded_id=model2_id,\n string=get_random_string(10),\n organization_id=55556,\n date_added=datetime.now(),\n last_seen=datetime.now(),\n retention_days=55,\n )\n testing_indexer._insert_db_records(UseCaseKey.PERFORMANCE, [model2], key_results2)\n assert mock.call_count == 0, \"Insert with collisions should not be called\"" ]
[ "0.66691655", "0.62592685", "0.60708004", "0.6027385", "0.6026459", "0.59589094", "0.5712259", "0.5664953", "0.5624898", "0.54451364", "0.53221446", "0.5284538", "0.52741164", "0.52515495", "0.52346677", "0.52338606", "0.5233629", "0.51976675", "0.51887155", "0.5188266", "0.51741624", "0.5172306", "0.51444685", "0.51443315", "0.5137773", "0.5136921", "0.5136729", "0.51242995", "0.5121705", "0.51184255", "0.5116612", "0.51159096", "0.51146", "0.50948256", "0.50899494", "0.5087033", "0.50781333", "0.5074192", "0.50656205", "0.50617737", "0.5053899", "0.50537646", "0.5044792", "0.50443476", "0.5038105", "0.5038082", "0.5037423", "0.50370187", "0.502535", "0.502331", "0.50175095", "0.50136864", "0.5005146", "0.50011367", "0.49998832", "0.499177", "0.4990891", "0.49755642", "0.49583206", "0.49419272", "0.49407217", "0.4930892", "0.49203518", "0.49144462", "0.4908648", "0.49080813", "0.49050573", "0.49050504", "0.4902849", "0.48991355", "0.489731", "0.4891109", "0.4887738", "0.48870626", "0.48857105", "0.4884675", "0.4883704", "0.48836198", "0.4883183", "0.48825562", "0.48789895", "0.48755985", "0.4872238", "0.48688024", "0.4868536", "0.48640528", "0.4859512", "0.4853893", "0.48512378", "0.48489642", "0.48441884", "0.48395672", "0.48391786", "0.48363787", "0.4834752", "0.48320395", "0.48305118", "0.48219687", "0.48210046", "0.4820691" ]
0.62067395
2
Ensure task batches are split and insertion is retried on TombstonedTaskError.
def test_splits_on_tombstoned(self, queue_add_mock): from google.appengine.api import taskqueue from sosbeacon.utils import insert_tasks queue_add_mock.side_effect = taskqueue.TombstonedTaskError tasks = [i for i in xrange(0, 7)] added = insert_tasks(tasks, 'default') self.assertEqual(added, 0) self.assertEqual(queue_add_mock.call_count, 13)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_no_chunk_size_no_n_splits_provided(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks([]))", "def test_splits_once(self, queue_add_mock):\n from google.appengine.api import taskqueue\n from sosbeacon.utils import insert_tasks\n\n def side_effect(*args):\n if 2 in args[0]:\n raise taskqueue.TombstonedTaskError('uh oh')\n\n queue_add_mock.side_effect = side_effect\n\n tasks = [i for i in xrange(0, 9)]\n added = insert_tasks(tasks, 'default')\n\n self.assertEqual(added, 8)\n self.assertEqual(queue_add_mock.call_count, 7)", "def test_block_bad_batch(self):\n pass", "def test_block_missing_batch(self):\n pass", "def test_splits_on_taskexists(self, queue_add_mock):\n from google.appengine.api import taskqueue\n from sosbeacon.utils import insert_tasks\n\n queue_add_mock.side_effect = taskqueue.TaskAlreadyExistsError\n\n tasks = [i for i in xrange(0, 10)]\n added = insert_tasks(tasks, 'default')\n\n self.assertEqual(added, 0)\n self.assertEqual(queue_add_mock.call_count, 19)", "async def test_delete_batch_invalid(database,valid_data):\n test_valid_insert_batch(database,valid_data)\n N = 10\n batch_id = 1\n for idx in range(N+1,N*2):\n try:\n await database.delete_batch(batch_id=batch_id,user_id=idx)\n assert False\n except:\n assert True\n await database.close_pool()", "def test_insert_batch(self, queue_mock):\n from sosbeacon.utils import insert_tasks\n\n tasks = []\n for i in xrange(1, 10):\n tasks.append(object())\n added = insert_tasks(tasks, 'default')\n self.assertEqual(added, 9)", "def test_re_split_cell_raises_error(mock_amg):\n\n # splitting once should work as normal\n mock_amg.cells[4].split()\n\n with pytest.raises(ValueError):\n mock_amg.cells[4].split()", "def test_synchronize_splits_error(self, mocker):\n storage = mocker.Mock(spec=SplitStorage)\n api = mocker.Mock()\n\n def run(x, c):\n raise APIException(\"something broke\")\n run._calls = 0\n api.fetch_splits.side_effect = run\n storage.get_change_number.return_value = -1\n\n from splitio.sync.split import SplitSynchronizer\n split_synchronizer = SplitSynchronizer(api, storage)\n\n with pytest.raises(APIException):\n split_synchronizer.synchronize_splits(1)", "def test_block_batches_order(self):\n pass", "def testTrainSplitError(self):\n with self.assertRaises(Exception):\n mnli.MnliDataset(\n mode='mismatched',\n split=tfds.Split.TRAIN,\n shuffle_buffer_size=20)", "def test_block_missing_batch_dependency(self):\n pass", "def test_rollbacked_transaction_discard_task(self):\n\n @transaction.commit_on_success\n def do_something():\n my_task.delay()\n raise SpecificException\n try:\n do_something()\n except SpecificException:\n self.assertFalse(my_global)\n else:\n self.fail('Exception not raised')", "def split(self, states_and_tasks):\n self._assert_is_batched(states_and_tasks)\n return self._tf_call(self._split, states_and_tasks)", "def test_invalid_follow_task_false(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=None,\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_invalid_follow_task_true(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=None,\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_chunk_size_has_priority_over_n_splits(self):\n chunks = list(chunk_tasks(range(4), chunk_size=4, n_splits=4))\n self.assertEqual(len(chunks), 1)\n self.assertEqual(len(chunks[0]), 4)\n self.assertEqual(list(range(4)), list(chain.from_iterable(chunks)))", "def _bulk_create_with_retry(\n self, table: IndexerTable, new_records: Sequence[BaseIndexer]\n ) -> None:\n retry_count = 0\n sleep_ms = 5\n last_seen_exception: Optional[BaseException] = None\n\n with metrics.timer(\"sentry_metrics.indexer.pg_bulk_create\"):\n # We use `ignore_conflicts=True` here to avoid race conditions where metric indexer\n # records might have be created between when we queried in `bulk_record` and the\n # attempt to create the rows down below.\n while retry_count + 1 < settings.SENTRY_POSTGRES_INDEXER_RETRY_COUNT:\n try:\n table.objects.bulk_create(new_records, ignore_conflicts=True)\n return\n except OperationalError as e:\n sentry_sdk.capture_message(\n f\"retryable deadlock exception encountered; pgcode={e.pgcode}, pgerror={e.pgerror}\"\n )\n if e.pgcode == DEADLOCK_DETECTED:\n metrics.incr(\"sentry_metrics.indexer.pg_bulk_create.deadlocked\")\n retry_count += 1\n sleep(sleep_ms / 1000 * (2**retry_count))\n last_seen_exception = e\n else:\n raise e\n # If we haven't returned after successful bulk create, we should re-raise the last\n # seen exception\n assert isinstance(last_seen_exception, BaseException)\n raise last_seen_exception", "def test_generator_without_iterable_len(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks(iter([]), n_splits=1))", "def transaction_failed_before_processing(self):", "def test_task_failed(self):\n\n task1 = FailedTask(mock.Mock(), total_retries=0)\n task2 = mock.Mock(execute_after=0)\n\n g = TaskDependencyGraph(MockWorkflowContext())\n seq = g.sequence()\n seq.add(task1, task2)\n\n with limited_sleep_mock():\n self.assertRaisesRegex(WorkflowFailed, 'failtask', g.execute)\n self.assertTrue(task1.is_terminated)\n self.assertFalse(task2.apply_async.called)", "def test_task_run_errors(self):\r\n user = User(\r\n email_addr=\"john.doe@example.com\",\r\n name=\"johndoe\",\r\n fullname=\"John Doe\",\r\n locale=\"en\")\r\n db.session.add(user)\r\n db.session.commit()\r\n\r\n user = db.session.query(User).first()\r\n app = App(\r\n name='Application',\r\n short_name='app',\r\n description='desc',\r\n owner_id=user.id)\r\n db.session.add(app)\r\n db.session.commit()\r\n\r\n task = Task(app_id=app.id)\r\n db.session.add(task)\r\n db.session.commit()\r\n\r\n task_run = TaskRun(app_id=None, task_id=task.id)\r\n db.session.add(task_run)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()\r\n\r\n task_run = TaskRun(app_id=app.id, task_id=None)\r\n db.session.add(task_run)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()", "def test_failedUpload(self):\n def _storeObject(content, contentType, metadata={}, created=None,\n objectId=None):\n raise ValueError('blah blah')\n object.__setattr__(self.backendStore, 'storeObject', _storeObject)\n\n nextScheduled = self.pendingUpload.scheduled + timedelta(minutes=5)\n def _nextAttempt():\n return nextScheduled\n object.__setattr__(self.pendingUpload, '_nextAttempt', _nextAttempt)\n\n self.successResultOf(self.pendingUpload.attemptUpload())\n self.assertIdentical(self.store.findUnique(_PendingUpload),\n self.pendingUpload)\n self.assertEquals(self.pendingUpload.scheduled,\n nextScheduled)\n errors = self.flushLoggedErrors(ValueError)\n self.assertEquals(len(errors), 1)", "def test_bulk_round_trip_with_backoff(self):\n self._test_bulk_round_trip(nodes=3, partitioner=\"murmur3\", num_operations=250000,\n copy_from_options={'MAXINFLIGHTMESSAGES': 64, 'MAXPENDINGCHUNKS': 1})", "def test_retry_errors_sooner(self):\n config_manager, json_file = self._setup_config_manager(\n 'socorro.unittest.cron.test_crontabber.BarBackfillJob|1d\\n'\n 'socorro.unittest.cron.test_crontabber.FooBackfillJob|1d\\n'\n 'socorro.unittest.cron.test_crontabber.FooBarBackfillJob|1d',\n extra_value_source={\n # crontabber already has a good default for this but by\n # being explict like this we not only show that it can be\n # changed, we also make it clear what the unit test is\n # supposed to do.\n 'crontabber.error_retry_time': '3600' # 1 hour\n }\n )\n\n # first we need to hack-about so that BarBackfillJob fails only\n # once.\n\n class SomeError(Exception):\n pass\n\n def nosy_run(self, date):\n dates_used[self.__class__].append(date)\n if self.__class__ == BarBackfillJob:\n if len(dates_used[self.__class__]) == 1:\n # first time run, simulate trouble\n raise SomeError(\"something went wrong\")\n return originals[self.__class__](self, date)\n\n classes = BarBackfillJob, FooBackfillJob, FooBarBackfillJob\n originals = {}\n dates_used = collections.defaultdict(list)\n for klass in classes:\n originals[klass] = klass.run\n klass.run = nosy_run\n\n try:\n with config_manager.context() as config:\n tab = crontabber.CronTabber(config)\n tab.run_all()\n self.assertEqual(len(dates_used[FooBackfillJob]), 1)\n self.assertEqual(len(dates_used[FooBackfillJob]), 1)\n # never gets there because dependency fails\n self.assertEqual(len(dates_used[FooBarBackfillJob]), 0)\n\n structure = json.load(open(json_file))\n assert structure['foo-backfill']\n assert not structure['foo-backfill']['last_error']\n next_date = utc_now() + datetime.timedelta(days=1)\n assert (\n next_date.strftime('%Y-%m-%d %H:%M') in\n structure['foo-backfill']['next_run']\n )\n\n assert structure['bar-backfill']\n assert structure['bar-backfill']['last_error']\n next_date = utc_now() + datetime.timedelta(hours=1)\n assert (\n next_date.strftime('%Y-%m-%d %H:%M') in\n structure['bar-backfill']['next_run']\n )\n\n assert 'foobar-backfill' not in structure\n\n # Now, let the magic happen, we pretend time passes by 2 hours\n # and run all jobs again\n self._wind_clock(json_file, hours=2)\n # this forces in crontabber instance to reload the JSON file\n tab._database = None\n\n # here, we go two hours later\n tab.run_all()\n\n # Here's the magic sauce! The FooBarBackfillJob had to wait\n # two hours to run after FooBackfillJob but it should\n # have been given the same date input as when FooBackfillJob\n # ran.\n self.assertEqual(len(dates_used[FooBackfillJob]), 1)\n self.assertEqual(len(dates_used[FooBackfillJob]), 1)\n self.assertEqual(len(dates_used[FooBarBackfillJob]), 1)\n\n # use this formatter so that we don't have to compare\n # datetimes with microseconds\n format = lambda x: x.strftime('%Y%m%d %H:%M %Z')\n self.assertEqual(\n format(dates_used[FooBackfillJob][0]),\n format(dates_used[FooBarBackfillJob][0])\n )\n # also check the others\n self.assertEqual(\n format(dates_used[BarBackfillJob][0]),\n format(dates_used[FooBarBackfillJob][0])\n )\n\n structure = json.load(open(json_file))\n self.assertTrue(structure['foo-backfill'])\n self.assertTrue(not structure['foo-backfill']['last_error'])\n self.assertTrue(structure['bar-backfill'])\n self.assertTrue(not structure['bar-backfill']['last_error'])\n self.assertTrue(structure['foobar-backfill'])\n self.assertTrue(not structure['foobar-backfill']['last_error'])\n\n finally:\n for klass in classes:\n klass.run = originals[klass]", "def test_task_errors(self):\r\n user = User(\r\n email_addr=\"john.doe@example.com\",\r\n name=\"johndoe\",\r\n fullname=\"John Doe\",\r\n locale=\"en\")\r\n db.session.add(user)\r\n db.session.commit()\r\n user = db.session.query(User).first()\r\n app = App(\r\n name='Application',\r\n short_name='app',\r\n description='desc',\r\n owner_id=user.id)\r\n db.session.add(app)\r\n db.session.commit()\r\n\r\n task = Task(app_id=None)\r\n db.session.add(task)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()", "def test_cmd_error(self):\n task = Task(\"uid\", False, False, \"does_not_exist\", None, \".\")\n task._checkpoint_dir = tmp_checkpoint_dir()\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's command:.*\"):\n task.run()\n task.shell = True\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's command:.*\"):\n task.run()\n task._dry_run = True\n task.run() # No longer raises RuntimeError", "def retry_failed(FailAdmin, request, queryset):\n for task in queryset:\n async_task(task.func, *task.args or (), hook=task.hook, **task.kwargs or {})\n task.delete()", "def test_block_extra_batch(self):\n pass", "def test_split(self):\n array = np.arange(1000)\n df = DataFlow.from_numpy(array)\n\n # first, test throw errors on invalid arguments\n def assert_invalid_arg(**kwargs):\n with self.assertRaises(ValueError):\n df.split(**kwargs)\n assert_invalid_arg(partitions=[])\n assert_invalid_arg(partitions=[1000, 1])\n assert_invalid_arg(partitions=[1000, -1])\n assert_invalid_arg(partitions=[1, 2])\n assert_invalid_arg(portions=[])\n assert_invalid_arg(portions=[1.0, 0.1])\n assert_invalid_arg(portions=[1.0, -1])\n assert_invalid_arg(portions=[0.1, 0.2])\n\n # next, test split without shuffling\n df1, df2, df3 = df.split(partitions=[700, 200, 100])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n df1, df2, df3 = df.split(portions=[-1, 0.2, 0.1])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n\n # finally, test split with shuffling\n df1, df2 = df.split(portions=[0.5, -1], shuffle=True)\n self.assertEquals(len(df1), 500)\n self.assertEquals(len(df2), 500)\n df_array = np.concatenate([df1.all()[0], df2.all()[0]], axis=0)\n self.assertFalse(np.all(df_array == array))\n np.testing.assert_array_equal(np.sort(df_array), array)", "def acknowledged_by_batchlog_not_set_when_batchlog_write_fails_test(self):\n session = self.prepare(nodes=3, compression=False)\n # kill 2 of the 3 nodes (all the batchlog write candidates).\n [ node.stop(gently=False) for node in self.cluster.nodelist()[1:] ]\n self.assert_timedout(session, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", ConsistencyLevel.ONE, received_responses=0)", "def test_after_batch_injection(self):\n intkey_batch1 = create_batch(self.signer, [('inc', 'abcd', 30)])\n intkey_batch2 = create_batch(self.signer, [('inc', 'abcd', 40)])\n intkey_batch3 = create_batch(self.signer, [('inc', 'abcd', 50)])\n batches = create_batch_list(\n [intkey_batch1, intkey_batch2, intkey_batch3])\n\n post_batch(batches)\n\n # Assert injected batches are after each intkey transaction\n # get last committed block (first from the list)\n last_block = get_blocks()[0]\n\n family_name = get_family_from(last_block['batches'][0])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][0]))\n self.assertEqual(payload.get('Name'), 'abcd')\n family_name = get_family_from(last_block['batches'][1])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][1]))\n self.assertEqual(payload.get('Name'), 'inject')\n\n family_name = get_family_from(last_block['batches'][2])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][2]))\n self.assertEqual(payload.get('Name'), 'abcd')\n family_name = get_family_from(last_block['batches'][3])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][3]))\n self.assertEqual(payload.get('Name'), 'inject')\n\n family_name = get_family_from(last_block['batches'][4])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][4]))\n self.assertEqual(payload.get('Name'), 'abcd')\n family_name = get_family_from(last_block['batches'][5])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][5]))\n self.assertEqual(payload.get('Name'), 'inject')", "def test_loopFailure_recovery(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n oldAssign = JobItem.assign\n @inlineCallbacks\n def _assign(self, when, overdue):\n work = yield self.workItem()\n if work.a == -2:\n raise ValueError(\"oops\")\n yield oldAssign(self, when, overdue)\n\n self.patch(JobItem, \"assign\", _assign)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Failing\n yield DummyWorkItem.makeJob(\n txn, a=-2, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=1, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60, 5)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertEqual(len(jobs), 1)\n self.assertEqual(jobs[0].assigned, None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertEqual(jobs[0].failed, 1)\n self.assertGreater(jobs[0].notBefore, datetime.datetime.utcnow() + datetime.timedelta(seconds=30))", "def _test_retry_after_unlimited_retry_error(self, exception):\r\n num_emails = 8\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = 0\r\n expected_succeeds = num_emails\r\n # Note that because celery in eager mode will call retries synchronously,\r\n # each retry will increase the stack depth. It turns out that there is a\r\n # maximum depth at which a RuntimeError is raised (\"maximum recursion depth\r\n # exceeded\"). The maximum recursion depth is 90, so\r\n # num_emails * expected_retries < 90.\r\n expected_retries = 10\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # Cycle through N throttling errors followed by a success.\r\n get_conn.return_value.send_messages.side_effect = cycle(\r\n chain(repeat(exception, expected_retries), [None])\r\n )\r\n self._test_run_with_task(\r\n send_bulk_course_email,\r\n 'emailed',\r\n num_emails,\r\n expected_succeeds,\r\n failed=expected_fails,\r\n retried_nomax=(expected_retries * num_emails)\r\n )", "def test_migration_task_rollback(self):\n server, source_host, target_host = self._create_server()\n self._disable_target_host(target_host)\n self._stub_delete_server_during_scheduling(server)\n\n # Now start the cold migration which will fail due to NoValidHost.\n self.api.post_server_action(server['id'], {'migrate': None},\n check_response_status=[202])\n # We cannot monitor the migration from the API since it is deleted\n # when the instance is deleted so just wait for the failed instance\n # action event after the task rollback happens.\n # Note that we get InstanceNotFound rather than NoValidHost because\n # the NoValidHost handler in ComputeTaskManager._cold_migrate calls\n # _set_vm_state_and_notify which raises InstanceNotFound and masks\n # the NoValidHost error.\n self._assert_resize_migrate_action_fail(\n server, instance_actions.MIGRATE, 'InstanceNotFound')\n self._assert_no_allocations(server)", "def test_loopFailure_failedRecovery(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n oldAssign = JobItem.assign\n @inlineCallbacks\n def _assign(self, when, overdue):\n work = yield self.workItem()\n if work.a == -2:\n raise ValueError(\"oops\")\n yield oldAssign(self, when, overdue)\n\n self.patch(JobItem, \"assign\", _assign)\n\n @inlineCallbacks\n def _failedToRun(self, locked=False, delay=None):\n raise ValueError(\"oops\")\n\n self.patch(JobItem, \"failedToRun\", _failedToRun)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Failing\n yield DummyWorkItem.makeJob(\n txn, a=-2, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=1, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60, 5)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertEqual(len(jobs), 2)\n self.assertEqual(jobs[0].assigned, None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertEqual(jobs[0].failed, 0)\n self.assertEqual(jobs[0].notBefore, fakeNow - datetime.timedelta(20 * 60))\n self.assertEqual(jobs[1].assigned, None)\n self.assertEqual(jobs[1].isAssigned, 0)\n self.assertEqual(jobs[1].failed, 0)\n self.assertEqual(jobs[1].notBefore, fakeNow - datetime.timedelta(20 * 60, 5))", "def test_delete_task_cascade(self):\r\n task = TaskFactory.create()\r\n task_runs = TaskRunFactory.create_batch(3, task=task)\r\n url = '/api/task/%s?api_key=%s' % (task.id, task.app.owner.api_key)\r\n res = self.app.delete(url)\r\n\r\n assert_equal(res.status, '204 NO CONTENT', res.data)\r\n task_runs = db.session.query(TaskRun)\\\r\n .filter_by(task_id=task.id)\\\r\n .all()\r\n assert len(task_runs) == 0, \"There should not be any task run for task\"", "def test_default_maximum_conflict(self):\n\n self.set_options(timeouts=True, timeout_maximum=1, timeout_default=10)\n task = self.create_task(self.context())\n with self.assertRaises(ErrorWhileTesting):\n task.execute()", "def test_anonymous_03_respects_limit_tasks(self):\r\n # Del previous TaskRuns\r\n self.del_task_runs()\r\n\r\n assigned_tasks = []\r\n # Get Task until scheduler returns None\r\n for i in range(10):\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n while data.get('info') is not None:\r\n # Check that we received a Task\r\n assert data.get('info'), data\r\n\r\n # Save the assigned task\r\n assigned_tasks.append(data)\r\n\r\n # Submit an Answer for the assigned task\r\n tr = TaskRun(app_id=data['app_id'], task_id=data['id'],\r\n user_ip=\"127.0.0.\" + str(i),\r\n info={'answer': 'Yes'})\r\n db.session.add(tr)\r\n db.session.commit()\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n # Check if there are 30 TaskRuns per Task\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n for t in tasks:\r\n assert len(t.task_runs) == 10, len(t.task_runs)\r\n # Check that all the answers are from different IPs\r\n err_msg = \"There are two or more Answers from same IP\"\r\n for t in tasks:\r\n for tr in t.task_runs:\r\n assert self.is_unique(tr.user_ip, t.task_runs), err_msg", "def test_generate_batches_from_1d_array_with_incomplete_batch(\n array,\n batch_size,\n expected):\n gen = BatchGenerator(array, batch_size=batch_size)\n\n actual = gen.drain()\n\n assert actual == expected", "def test_bulk_group_errors(self):\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_actg_missing_col)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n res = self.client.post(self.ag_url, data)\n assert res.status_code == status.HTTP_400_BAD_REQUEST", "def test_batch_passed_through(self, get_task_mock, insert_tasks_mock):\n from sosbeacon.event.message import broadcast_to_groups\n\n group_key = Mock()\n group_key.id.return_value = 'SomeGroup'\n\n event_key = Mock()\n\n message_key = Mock()\n message_key.urlsafe.return_value = 'abc'\n\n broadcast_to_groups([group_key], event_key, message_key, '')\n\n self.assertEqual(insert_tasks_mock.call_count, 1)\n\n get_task_mock.assert_called_once_with(\n group_key, event_key, message_key, '')", "def test_task_preloading(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n # Register\r\n self.register()\r\n self.signin()\r\n\r\n assigned_tasks = []\r\n # Get Task until scheduler returns None\r\n res = self.app.get('api/app/1/newtask')\r\n task1 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task1.get('info'), task1\r\n # Pre-load the next task for the user\r\n res = self.app.get('api/app/1/newtask?offset=1')\r\n task2 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task2.get('info'), task2\r\n # Check that both tasks are different\r\n assert task1.get('id') != task2.get('id'), \"Tasks should be different\"\r\n ## Save the assigned task\r\n assigned_tasks.append(task1)\r\n assigned_tasks.append(task2)\r\n\r\n # Submit an Answer for the assigned and pre-loaded task\r\n for t in assigned_tasks:\r\n tr = dict(app_id=t['app_id'], task_id=t['id'], info={'answer': 'No'})\r\n tr = json.dumps(tr)\r\n\r\n self.app.post('/api/taskrun', data=tr)\r\n # Get two tasks again\r\n res = self.app.get('api/app/1/newtask')\r\n task3 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task3.get('info'), task1\r\n # Pre-load the next task for the user\r\n res = self.app.get('api/app/1/newtask?offset=1')\r\n task4 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task4.get('info'), task2\r\n # Check that both tasks are different\r\n assert task3.get('id') != task4.get('id'), \"Tasks should be different\"\r\n assert task1.get('id') != task3.get('id'), \"Tasks should be different\"\r\n assert task2.get('id') != task4.get('id'), \"Tasks should be different\"\r\n # Check that a big offset returns None\r\n res = self.app.get('api/app/1/newtask?offset=11')\r\n assert json.loads(res.data) == {}, res.data", "def migrate_broken_records(broken_output=None, dry_run=False):\n for i, chunk in enumerate(chunker(\n record.marcxml for record in\n db.session.query(InspireProdRecords).filter_by(successful=False))):\n logger.info(\"Processed {} records\".format(i * CHUNK_SIZE))\n chunk_broken_output = None\n if broken_output:\n chunk_broken_output = \"{}-{}\".format(broken_output, i)\n migrate_chunk.delay(chunk, chunk_broken_output, dry_run)", "def test_before_batch_injection(self):\n intkey_batch1 = create_batch(self.signer, [('set', 'abcd', 0)])\n intkey_batch2 = create_batch(self.signer, [('inc', 'abcd', 10)])\n intkey_batch3 = create_batch(self.signer, [('inc', 'abcd', 20)])\n batches = create_batch_list(\n [intkey_batch1, intkey_batch2, intkey_batch3])\n\n post_batch(batches)\n\n # Assert injected batches are before each intkey transaction\n # get last committed block (first from the list)\n last_block = get_blocks()[0]\n\n family_name = get_family_from(last_block['batches'][0])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][0]))\n self.assertEqual(payload.get('Name'), 'inject')\n family_name = get_family_from(last_block['batches'][1])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][1]))\n self.assertEqual(payload.get('Name'), 'abcd')\n\n family_name = get_family_from(last_block['batches'][2])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][2]))\n self.assertEqual(payload.get('Name'), 'inject')\n family_name = get_family_from(last_block['batches'][3])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][3]))\n self.assertEqual(payload.get('Name'), 'abcd')\n\n family_name = get_family_from(last_block['batches'][4])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][4]))\n self.assertEqual(payload.get('Name'), 'inject')\n family_name = get_family_from(last_block['batches'][5])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][5]))\n self.assertEqual(payload.get('Name'), 'abcd')", "def test_instantiating_salesforce_bulk_job_validates_operation(salesforce_session, bulk_request):\n with pytest.raises(AssertionError):\n SalesforceBulkJob('floob', 'Lead')", "def test_attemptMigrationFails(self):\n obj, migration, pendingMigration = self._mkMigrationJunk()\n\n def _explode(*a, **kw):\n return fail(ValueError('42'))\n object.__setattr__(self.mockStore, 'storeObject', _explode)\n\n def _eb(f):\n # .store is set to None on deletion\n self.assertNotIdentical(pendingMigration.store, None)\n tb = pendingMigration.lastFailure\n [tb2] = self.flushLoggedErrors(ValueError)\n self.assertIn(u'ValueError: 42', tb)\n self.assertEquals(tb.encode('ascii'), tb2.getTraceback())\n\n d = pendingMigration.attemptMigration()\n return self.assertFailure(d, ValueError).addErrback(_eb)", "def acknowledged_by_batchlog_set_when_batchlog_write_succeeds_test(self):\n session = self.prepare(nodes=3, compression=False)\n # kill one of the nodes so that batchlog will be written, but the write will fail.\n self.cluster.nodelist()[-1].stop(gently=False)\n self.assert_timedout(session, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", ConsistencyLevel.THREE, received_responses=2)", "def reFragmentMissedTasks(missedTasks, options):\n options.chunk=1+(options.chunk*len(missedTasks)/options.splits)\n temporaryLocation=\"%s%stmp\"%(options.tmpDir,os.sep)\n os.makedirs(temporaryLocation)\n\n fileType = getFileType(options, None)\n\n # create a fileHandle-like object that will read all missed fragments\n inputsToReFragment=[getFragmentPath(options.tmpDir, options.fragBase, i) for i in missedTasks]\n logging.info(\"Restarting fragments: %s\" % missedTasks)\n logging.debug(\"Restarting fragments: %s\" % inputsToReFragment)\n failedRecordStream = fileinput.input(inputsToReFragment)\n\n # create new fragments in temporaryLocation\n newFragNum=fragmentInputStreamBySize(failedRecordStream, temporaryLocation,\n options.chunk, fileType,\n options.fragBase,\n splitOnSize=options.splitOnSize,\n suffix=options.fragSuff)\n\n # remove old fragments\n for i in missedTasks:\n frag = getFragmentPath(options.tmpDir, options.fragBase, i)\n os.remove(frag)\n\n return newFragNum+1", "def test_user_03_respects_limit_tasks(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n assigned_tasks = []\r\n # We need one extra loop to allow the scheduler to mark a task as completed\r\n for i in range(11):\r\n self.register(fullname=self.user.username + str(i),\r\n name=self.user.username + str(i),\r\n password=self.user.username + str(i))\r\n self.signin()\r\n # Get Task until scheduler returns None\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n while data.get('info') is not None:\r\n # Check that we received a Task\r\n assert data.get('info'), data\r\n\r\n # Save the assigned task\r\n assigned_tasks.append(data)\r\n\r\n # Submit an Answer for the assigned task\r\n tr = dict(app_id=data['app_id'], task_id=data['id'],\r\n info={'answer': 'No'})\r\n tr = json.dumps(tr)\r\n self.app.post('/api/taskrun', data=tr)\r\n\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n self.signout()\r\n\r\n # Check if there are 30 TaskRuns per Task\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n for t in tasks:\r\n assert len(t.task_runs) == 10, t.task_runs\r\n # Check that all the answers are from different IPs\r\n err_msg = \"There are two or more Answers from same User\"\r\n for t in tasks:\r\n for tr in t.task_runs:\r\n assert self.is_unique(tr.user_id, t.task_runs), err_msg\r\n # Check that task.state is updated to completed\r\n for t in tasks:\r\n assert t.state == \"completed\", t.state", "def test_split_cell_throws_error_for_non_int_location():\n\n # create grid with an odd spacing\n amg = mg.MultiGrid([64, 64], 63, WS=127)\n\n # try to split a cell and check it raises an error\n with pytest.raises(ValueError):\n amg.cells[0].split()", "def aknowledged_by_batchlog_not_set_when_batchlog_write_fails_test(self):\n cursor = self.prepare(nodes=3)\n # kill 2 of the 3 nodes (all the batchlog write candidates).\n [ node.stop(gently=False) for node in self.cluster.nodelist()[1:] ]\n self.assert_timedout(cursor, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", ConsistencyLevel.ONE, acknowledged_by_batchlog=False)", "def test_aborting_batch(created_job, bulk_request):\n bulk_request.reset_mock()\n\n assert created_job.is_open\n\n created_job.abort()\n\n assert not created_job.job\n assert not created_job.job_url\n assert not created_job.pending_batches\n assert not created_job.is_open\n\n bulk_request.assert_called_once_with(\n 'post',\n 'https://salesforce/services/async/34.0/job/THEJOBID',\n data=XMLMatcher('''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <jobInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <state>Aborted</state>\n </jobInfo>\n '''),\n expected_response=200\n )", "def _run(self) -> None:\n while True:\n args: MigrationArgs = self._queue.get(block=True)\n with self._lock:\n if args.collection in self._chunks:\n if args.shard_key not in self._chunks[args.collection]:\n self._split_chunk(args.collection, args.shard_key)\n self._move_chunk(args)", "def test_verify_error(self):\n task = Task(\"uid\", False, False, \"echo\", \"does_not_exist\", \".\", \"A\")\n task._checkpoint_dir = tmp_checkpoint_dir()\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's verification:.*\"):\n task.run()\n task.shell = True\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's verification:.*\"):\n task.run()\n task._dry_run = True\n task.run() # No longer raises RuntimeError", "def _retry(*, task, signature_kwargs, retries):\n if retries < MAX_RETRIES:\n step = task.signature(**signature_kwargs)\n queue = step.options.get(\"queue\", task.queue)\n step.options[\"queue\"] = f\"{queue}-delay\"\n step.kwargs[\"retries\"] = retries + 1\n on_commit(step.apply_async)\n else:\n raise MaxRetriesExceededError", "def test_retry_run(self):\n pass", "def test_loopFailure_noRecovery(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n oldNextJob = JobItem.nextjob\n @inlineCallbacks\n def _nextJob(cls, txn, now, minPriority, rowLimit):\n job = yield oldNextJob(txn, now, minPriority, rowLimit)\n work = yield job.workItem()\n if work.a == -2:\n raise ValueError(\"oops\")\n\n self.patch(JobItem, \"nextjob\", classmethod(_nextJob))\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Failing\n yield DummyWorkItem.makeJob(\n txn, a=-2, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=1, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60, 5)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertEqual(len(jobs), 2)\n self.assertEqual(jobs[0].assigned, None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertEqual(jobs[0].failed, 0)\n self.assertEqual(jobs[0].notBefore, fakeNow - datetime.timedelta(20 * 60))\n self.assertEqual(jobs[1].assigned, None)\n self.assertEqual(jobs[1].isAssigned, 0)\n self.assertEqual(jobs[1].failed, 0)\n self.assertEqual(jobs[1].notBefore, fakeNow - datetime.timedelta(20 * 60, 5))", "def test_exceptionWhenWorking(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=1, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n\n # Error\n yield DummyWorkItem.makeJob(\n txn, a=-1, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=2, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n yield setup\n clock.advance(20 - 12)\n\n # Work item complete\n self.assertTrue(DummyWorkItem.results == {1: 1, 3: 2})", "def test_generator_input_with_no_iterable_len_raises(self):\n for chunk_size, n_splits, n_jobs in product([None, 1, 3], [None, 1, 3], [None, 1, 3]):\n with self.subTest(chunk_size=chunk_size, n_splits=n_splits, n_jobs=n_jobs), self.assertRaises(ValueError):\n get_n_chunks(iter(self.test_data), iterable_len=None, chunk_size=chunk_size, n_splits=n_splits,\n n_jobs=n_jobs)", "def _test_run_with_long_error_msg(self, task_class):\r\n task_entry = self._create_input_entry()\r\n self.define_option_problem(PROBLEM_URL_NAME)\r\n expected_message = \"x\" * 1500\r\n with self.assertRaises(TestTaskFailure):\r\n self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id, expected_message)\r\n # compare with entry in table:\r\n entry = InstructorTask.objects.get(id=task_entry.id)\r\n self.assertEquals(entry.task_state, FAILURE)\r\n self.assertGreater(1023, len(entry.task_output))\r\n output = json.loads(entry.task_output)\r\n self.assertEquals(output['exception'], 'TestTaskFailure')\r\n self.assertEquals(output['message'], expected_message[:len(output['message']) - 3] + \"...\")\r\n self.assertTrue('traceback' not in output)", "def _on_task_fail(self, exc):\n LOG.info(\"We could cleanup some resources or log the error\")\n raise exc", "def mark_as_failure(self, task_id, exc, *args, **kwargs):\n django_logger.exception('Celery task failed: %s' % exc, exc_info=exc)\n super().mark_as_failure(task_id, exc, *args, **kwargs)", "def handle_batch(self, transfer_batch, dilution_settings, robot_settings):\n # Raise an error if any transfer requires evaporation, that's currently not implemented\n for transfer in transfer_batch.transfers:\n if transfer.pipette_sample_volume > transfer.target_vol:\n raise UsageError(\"Evaporation needed for '{}' - not implemented yet\".format(\n transfer.target_location.artifact.name))\n\n split = [t for t in transfer_batch.transfers if self.needs_split(t, dilution_settings, robot_settings)]\n no_split = [t for t in transfer_batch.transfers if t not in split]\n\n if len(split) > 0:\n return self.split_transfer_batch(split, no_split, dilution_settings, robot_settings)\n else:\n # No split was required\n return TransferBatchCollection(transfer_batch)", "def _test_retry_after_limited_retry_error(self, exception):\r\n # If we want the batch to succeed, we need to send fewer emails\r\n # than the max retries, so that the max is not triggered.\r\n num_emails = settings.BULK_EMAIL_MAX_RETRIES\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = 0\r\n expected_succeeds = num_emails\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # Have every other mail attempt fail due to disconnection.\r\n get_conn.return_value.send_messages.side_effect = cycle([exception, None])\r\n self._test_run_with_task(\r\n send_bulk_course_email,\r\n 'emailed',\r\n num_emails,\r\n expected_succeeds,\r\n failed=expected_fails,\r\n retried_withmax=num_emails\r\n )", "def _test_undefined_problem(self, task_class):\r\n task_entry = self._create_input_entry()\r\n with self.assertRaises(ItemNotFoundError):\r\n self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id)", "def step(self):\n raise TaskError(\"Task %s: subclass should override step() method!\" %\n self)", "def _skip_aware_next_available_task_id(request, batch):\n def _get_skipped_task_ids_for_batch(session, batch_id):\n batch_id = str(batch_id)\n if 'skipped_tasks_in_batch' in session and \\\n batch_id in session['skipped_tasks_in_batch']:\n return session['skipped_tasks_in_batch'][batch_id]\n else:\n return None\n\n available_task_ids = batch.available_task_ids_for(request.user)\n skipped_ids = _get_skipped_task_ids_for_batch(request.session, batch.id)\n\n if skipped_ids:\n task_id = available_task_ids.exclude(id__in=skipped_ids).first()\n if not task_id:\n task_id = available_task_ids.filter(id__in=skipped_ids).first()\n if task_id:\n messages.info(request, 'Only previously skipped Tasks are available')\n\n # Once all remaining Tasks have been marked as skipped, we clear\n # their skipped status. If we don't take this step, then a Task\n # cannot be skipped a second time.\n request.session['skipped_tasks_in_batch'][str(batch.id)] = []\n request.session.modified = True\n else:\n task_id = available_task_ids.first()\n\n return task_id", "def validate_batch(self, *args, **kwargs):\n raise NotImplementedError()", "def testTrialErrored2(self):\n stats = self.default_statistics()\n trial_count = stats[str(0)][\"n\"] + stats[str(1)][\"n\"]\n sched, mock_runner = self.schedulerSetup(trial_count)\n trials = sched._state[\"bracket\"].current_trials()\n for t in trials[:-1]:\n mock_runner._launch_trial(t)\n sched.on_trial_result(\n mock_runner, t, result(stats[str(1)][\"r\"], 10))\n\n mock_runner._launch_trial(trials[-1])\n sched.on_trial_error(mock_runner, trials[-1])\n self.assertEqual(len(sched._state[\"bracket\"].current_trials()),\n self.downscale(stats[str(1)][\"n\"], sched))", "def logged_batch_throws_uae_test(self):\n cursor = self.prepare(nodes=3)\n [ node.stop(wait_other_notice=True) for node in self.cluster.nodelist()[1:] ]\n cursor.consistency_level = 'ONE'\n assert_unavailable(cursor.execute, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\")", "def _enqueue_task(self):\n\t\t# TODO(bslatkin): Remove these retries when they're not needed in userland.\n\t\tRETRIES = 3\n\t\ttarget_queue = os.environ.get('X_APPENGINE_QUEUENAME', constants.FEED_QUEUE)\n\t\tfor i in xrange(RETRIES):\n\t\t\ttry:\n\t\t\t\ttaskqueue.Task(\n\t\t\t\t\t\turl='/work/pull_feeds',\n\t\t\t\t\t\teta=self.eta,\n\t\t\t\t\t\tparams={'topic': self.topic}\n\t\t\t\t\t\t).add(target_queue)\n\t\t\texcept (taskqueue.Error, apiproxy_errors.Error):\n\t\t\t\tlogging.exception('Could not insert task to fetch topic = %s',\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.topic)\n\t\t\t\tif i == (RETRIES - 1):\n\t\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn", "async def test_sending_chunks_with_error(mocker):\n conn = mocker.MagicMock()\n conn.writer = None\n mocker.patch(\"aiosonic._handle_chunk\")\n\n def chunks_data():\n yield b\"foo\"\n\n with pytest.raises(MissingWriterException):\n await aiosonic._send_chunks(conn, chunks_data())\n\n with pytest.raises(ValueError):\n await aiosonic._send_chunks(conn, {})", "def test_exceptionUnassign(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Next, create failing work that's actually far enough into the past to run.\n yield DummyWorkItem.makeJob(\n txn, a=-1, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertTrue(len(jobs) == 1)\n self.assertTrue(jobs[0].assigned is None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertTrue(jobs[0].failed == 1)\n self.assertTrue(jobs[0].notBefore > datetime.datetime.utcnow())", "def test_bulk_round_trip_non_prepared_statements(self):\n self._test_bulk_round_trip(nodes=3, partitioner=\"murmur3\", num_operations=100000,\n copy_from_options={'PREPAREDSTATEMENTS': False})", "def test_integrity_error_bulk_create(self):\n link1, link2 = LinkFactory.create_batch(2)\n self.service.get_clicks_for_date.return_value = {\n unicode(link1.pk): '4',\n unicode(link2.pk): '7'\n }\n\n with patch.object(collect_ga_data, 'DataPoint') as MockDataPoint:\n MockDataPoint.objects.bulk_create.side_effect = IntegrityError\n\n with self.assertRaises(CommandError):\n self.command.execute()", "async def test_delete_batch_valid(database, valid_data):\n await test_valid_insert_batch(database,valid_data)\n database = await Database.connect_pool()\n N = 10\n batch_id = 1\n for idx in range(N):\n await database.delete_batch(batch_id=batch_id,user_id=str(idx))\n await database.close_pool()", "def test_invalid_conn(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_cant_allocate_partitioner(self):\n def just_raise(*a, **kw):\n raise ValueError(\"Something went wrong!\")\n self.client.SetPartitioner = just_raise\n partitioner = self.tx_client.SetPartitioner(\"xyzzy\", set([1, 2, 3]))\n self.assertTrue(partitioner.failed)", "def failed_task(self):\n self.report_total_usage()\n #print 'failure killed task %s from sim %s' % (self.name, self.sim.name)\n self.num_faults += 1\n self.retry = True\n self.state = \"ready\"\n if self.using.nodes > 0:\n self.RM.release_allocation(self, self.using.nodes - 1, failed=True)\n self.using.clear()\n self.curr_exec_time = 0\n self.fwk.logEvent(self.sim.name, self.name, \"failed_task\", \"task failed due to node failure\")", "def test_task_add():\n pytest.fail('Not implemented yet.')", "async def test_valid_insert_batch(database,valid_data):\n await database.setup_database(reset=True)\n data = []\n for _id,user_id,embeddings,batch_id in valid_data: #pylint: disable=unused-variable\n await database.insert_user(user_id)\n data.append((_id,user_id,embeddings,1))\n await database.insert_batch(data)\n await database.close_pool()", "async def test_batch_list_with_bad_sort(self):\n self.stream.preset_response(self.status.INVALID_SORT)\n response = await self.get_assert_status('/transactions?sort=bad', 400)\n\n self.assert_has_valid_error(response, 57)", "def test_temporaryFailure(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Next, create failing work that's actually far enough into the past to run.\n yield DummyWorkItem.makeJob(\n txn, a=-2, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertTrue(len(jobs) == 1)\n self.assertTrue(jobs[0].assigned is None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertTrue(jobs[0].failed == 1)\n self.assertTrue(jobs[0].notBefore > datetime.datetime.utcnow() + datetime.timedelta(seconds=90))", "def test_copy_from_with_more_failures_than_max_attempts(self):\n num_records = 1000\n self.prepare(nodes=1)\n\n logger.debug('Running stress')\n stress_table = 'keyspace1.standard1'\n self.node1.stress(['write', 'n={}'.format(num_records), 'no-warmup', '-rate', 'threads=50'])\n\n tempfile = self.get_temp_file()\n logger.debug('Exporting to csv file {} to generate a file'.format(tempfile.name))\n self.run_cqlsh(cmds=\"COPY {} TO '{}'\".format(stress_table, tempfile.name))\n\n self.session.execute(\"TRUNCATE {}\".format(stress_table))\n\n failures = {'failing_batch': {'id': 30, 'failures': 5}}\n os.environ['CQLSH_COPY_TEST_FAILURES'] = json.dumps(failures)\n logger.debug('Importing from csv file {} with {}'.format(tempfile.name, os.environ['CQLSH_COPY_TEST_FAILURES']))\n out, err, _ = self.run_cqlsh(cmds=\"COPY {} FROM '{}' WITH CHUNKSIZE='1' AND MAXATTEMPTS='3'\"\n .format(stress_table, tempfile.name))\n logger.debug(out)\n logger.debug(err)\n\n assert 'Failed to process' in err\n num_records_imported = rows_to_list(self.session.execute(\"SELECT COUNT(*) FROM {}\".format(stress_table)))[0][0]\n assert num_records_imported < num_records", "def test_exception_in_all_worker_process(self):\n pool = ProcessPool(5)\n pool.start(ExceptionGeneratingWorker_5)\n with self.assertRaises(RuntimeError):\n for _ in range(10000):\n pool.ventilate(\"Datanum\")\n time.sleep(.1)", "def _localHandleFailedRuns(self, failedRuns):\n if len(failedRuns) > 0:\n self.raiseADebug(' Continuing with reduced-size Monte-Carlo sampling.')", "def test_batch(self):\n pass", "def process_all_runs(run_storage_base, options):\n global current\n\n errored_tasks = []\n running_tasks = []\n for name in os.listdir(run_storage_base):\n run_dir = path.join(run_storage_base, name)\n run_id = get_run_id_from_path(run_dir)\n if path.isdir(run_dir) and run_info.is_illumina_run(run_dir):\n try:\n emitted_task = try_autoprocessing(run_dir, options)\n if emitted_task and emitted_task.status == tasks.ERROR:\n errored_tasks.append((emitted_task, None))\n if emitted_task and emitted_task.status == tasks.RUNNING:\n running_tasks.append(emitted_task)\n except Exception as e:\n # Dummy catchall task to signal exceptional failure\n errored_tasks.append((ProcessingTask(run_id,\n 'try_autoprocessing',\n tasks.ERROR), e))\n logging.exception(e)\n if options.verbose:\n logging.error(\"try_autoprocessing failed:\\n %s\", e)\n\n current.task = None\n\n # Stop on error in any task, don't continue with the other runs\n # if emitted_task and emitted_task.status != COMPLETE:\n # break\n\n errorlist = ', '.join(['%s:%s' % (t.task_name, t.run_id)\n for t, e in errored_tasks])\n runninglist = ', '.join(['%s:%s' % (t.task_name, t.run_id)\n for t in running_tasks])\n if options.verbose:\n if running_tasks:\n logging.info(\"%s task(s) are currently running (%s): %s\",\n len(running_tasks), run_storage_base, runninglist)\n else:\n logging.info(\"Successfully completed processing of runs in: %s\",\n run_storage_base)\n\n if errored_tasks:\n if options.verbose:\n logging.error(\"Processing runs in %s completed with failures: %s\",\n run_storage_base, errorlist)\n logging.getLogger('autoprocess_notify').error(\n \"Processing runs in %s completed with failures: %s\",\n run_storage_base,\n errorlist)\n for t, ex in errored_tasks:\n if ex:\n logging.exception(ex)\n else:\n # Throttle notification log if in --quiet mode\n notify_every = timedelta(minutes=options.notify_frequency)\n for t, ex in errored_tasks:\n if t.last_failure_notify_time is None or \\\n (t.last_failure_notify_time +\n notify_every < datetime.now()):\n logging.getLogger('autoprocess_notify').error(\n \"Processing runs in %s completed with failures: %s\",\n run_storage_base,\n errorlist)\n t.last_failure_notify_time = datetime.now()\n taskdb = TaskDb(path.join(run_storage_base, t.run_id),\n ProcessingTask, t.run_id)\n taskdb.update(t)\n # t._db.update(t)\n break\n\n return not errored_tasks", "async def _clean_up_batch_of_old_cache_invalidations(\n self, delete_up_to_millisec: int\n ) -> bool:\n\n def _clean_up_batch_of_old_cache_invalidations_txn(\n txn: LoggingTransaction,\n ) -> bool:\n # First get the earliest stream ID\n txn.execute(\n \"\"\"\n SELECT stream_id FROM cache_invalidation_stream_by_instance\n ORDER BY stream_id ASC\n LIMIT 1\n \"\"\"\n )\n row = txn.fetchone()\n if row is None:\n return False\n earliest_stream_id: int = row[0]\n\n # Then find the last stream ID of the range we will delete\n txn.execute(\n \"\"\"\n SELECT stream_id FROM cache_invalidation_stream_by_instance\n WHERE stream_id <= ? AND invalidation_ts <= ?\n ORDER BY stream_id DESC\n LIMIT 1\n \"\"\",\n (earliest_stream_id + CLEAN_UP_MAX_BATCH_SIZE, delete_up_to_millisec),\n )\n row = txn.fetchone()\n if row is None:\n return False\n cutoff_stream_id: int = row[0]\n\n # Determine whether we are caught up or still catching up\n txn.execute(\n \"\"\"\n SELECT invalidation_ts FROM cache_invalidation_stream_by_instance\n WHERE stream_id > ?\n ORDER BY stream_id ASC\n LIMIT 1\n \"\"\",\n (cutoff_stream_id,),\n )\n row = txn.fetchone()\n if row is None:\n in_backlog = False\n else:\n # We are in backlog if the next row could have been deleted\n # if we didn't have such a small batch size\n in_backlog = row[0] <= delete_up_to_millisec\n\n txn.execute(\n \"\"\"\n DELETE FROM cache_invalidation_stream_by_instance\n WHERE ? <= stream_id AND stream_id <= ?\n \"\"\",\n (earliest_stream_id, cutoff_stream_id),\n )\n\n return in_backlog\n\n return await self.db_pool.runInteraction(\n \"clean_up_old_cache_invalidations\",\n _clean_up_batch_of_old_cache_invalidations_txn,\n )", "def aknowledged_by_batchlog_set_when_batchlog_write_succeeds_test(self):\n cursor = self.prepare(nodes=3)\n # kill one of the nodes so that batchlog will be written, but the write will fail.\n self.cluster.nodelist()[-1].stop(gently=False)\n self.assert_timedout(cursor, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", ConsistencyLevel.THREE, acknowledged_by_batchlog=True)", "def test_error_before_all_processes_complete(self):\n first = \"\"\"file://B <- file://A\n sleep 1\n echo A produces B > B\n error\n \nfile://C <- file://A\n sleep 2\n echo A produces C > C\n \"\"\"\n\n rcode, output = run_tuttle_file(first, nb_workers=2)\n assert rcode == 2\n assert isfile('B')\n assert not isfile('C')\n w = Workflow.load()\n p = w.find_process_that_creates(\"file://C\")\n assert not p.success, \"Process that creates C should be in error in the dump\"", "def bulkCreateTasks(request, *args, **kwargs):\n import settings\n\n # keep track of our own timelimit (20 seconds)\n timelimit = 20000\n timekeeper = Timekeeper(timelimit)\n\n post_dict = request.POST\n\n bulk_create_key = post_dict.get('bulk_create_key')\n if not bulk_create_key:\n return error_handler.logErrorAndReturnOK(\n 'Not all POST data specified in: %s' % post_dict)\n\n bulk_data = bulk_create_model.GCIBulkCreateData.get(bulk_create_key)\n if not bulk_data:\n return error_handler.logErrorAndReturnOK(\n 'No valid data found for key: %s' % bulk_create_key)\n\n # note that we only query for the quota once\n org_admin = bulk_data.created_by\n task_quota = org_logic.getRemainingTaskQuota(org_admin.scope)\n\n tasks = bulk_data.tasks\n while len(tasks) > 0:\n try:\n # check if we have time\n timekeeper.ping()\n\n if settings.GCI_TASK_QUOTA_LIMIT_ENABLED and task_quota <= 0:\n return error_handler.logErrorAndReturnOK(\n 'Task quota reached for %s' %(org_admin.scope.name))\n\n # remove the first task\n task_as_string = tasks.pop(0)\n\n loaded_task = simplejson.loads(task_as_string)\n task = {}\n for key, value in loaded_task.iteritems():\n # If we don't do this python will complain about kwargs not being\n # strings when we try to save the new task.\n task[key.encode('UTF-8')] = value\n\n logging.info('Uncleaned task: %s' %task)\n # clean the data\n errors = _cleanTask(task, org_admin)\n\n if errors:\n logging.warning(\n 'Invalid task data uploaded, the following errors occurred: %s'\n %errors)\n bulk_data.errors.append(db.Text(\n 'The task in row %i contains the following errors.\\n %s' \\\n %(bulk_data.tasksRemoved(), '\\n'.join(errors))))\n\n # at-most-once semantics for creating tasks\n bulk_data.put()\n\n if errors:\n # do the next task\n continue\n\n # set other properties\n task['link_id'] = 't%i' % (int(time.time()*100))\n task['scope'] = org_admin.scope\n task['scope_path'] = org_admin.scope_path\n task['program'] = org_admin.program\n task['status'] = 'Unpublished'\n task['created_by'] = org_admin\n task['modified_by'] = org_admin\n\n # create the new task\n logging.info('Creating new task with fields: %s' %task)\n task_logic.updateOrCreateFromFields(task)\n task_quota = task_quota - 1\n except DeadlineExceededError:\n # time to bail out\n pass\n\n if len(tasks) == 0:\n # send out a message\n notifications.sendBulkCreationCompleted(bulk_data)\n bulk_data.delete()\n else:\n # there is still work to be done, do a non 500 response and requeue\n task_params = {\n 'bulk_create_key': bulk_data.key().id_or_name()\n }\n new_task = taskqueue.Task(params=task_params,\n url=BULK_CREATE_URL)\n # add to the gci queue\n new_task.add(queue_name='gci-update')\n\n # we're done here\n return http.HttpResponse('OK')", "def test_tt_split(self):\n\n bad_arg1 = 5\n bad_arg2 = \"It's a string!\"\n ld = Lambdata(self.df)\n\n ld.tt_split(bad_arg1)\n ld.tt_split(bad_arg2)\n self.assertRaises(ValueError)", "def test_raise_enum_upsert_schema_error(data_row, mdo):\n\n metadata = DataRowMetadata(data_row_id=data_row.uid,\n fields=[\n DataRowMetadataField(schema_id=TEST_SPLIT_ID,\n value=SPLIT_SCHEMA_ID),\n ])\n with pytest.raises(ValueError):\n mdo.bulk_upsert([metadata])", "def logged_batch_throws_uae_test(self):\n session = self.prepare(nodes=3)\n [ node.stop(wait_other_notice=True) for node in self.cluster.nodelist()[1:] ]\n session.consistency_level = 'ONE'\n assert_unavailable(session.execute, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\")", "def test_copy_from_with_unacked_batches(self):\n num_records = 1000\n self.prepare(nodes=1)\n\n logger.debug('Running stress')\n stress_table = 'keyspace1.standard1'\n self.node1.stress(['write', 'n={}'.format(num_records), 'no-warmup', '-rate', 'threads=50'])\n\n tempfile = self.get_temp_file()\n logger.debug('Exporting to csv file {} to generate a file'.format(tempfile.name))\n self.run_cqlsh(cmds=\"COPY {} TO '{}'\".format(stress_table, tempfile.name))\n\n self.session.execute(\"TRUNCATE {}\".format(stress_table))\n\n failures = {'unsent_batch': {'id': 30}}\n os.environ['CQLSH_COPY_TEST_FAILURES'] = json.dumps(failures)\n logger.debug('Importing from csv file {} with {}'.format(tempfile.name, os.environ['CQLSH_COPY_TEST_FAILURES']))\n out, err, _ = self.run_cqlsh(cmds=\"COPY {} FROM '{}' WITH CHUNKSIZE=1 AND CHILDTIMEOUT=30 AND REQUESTTIMEOUT=15\"\n .format(stress_table, tempfile.name))\n logger.debug(out)\n logger.debug(err)\n\n assert 'No records inserted in 30 seconds, aborting' in err\n num_records_imported = rows_to_list(self.session.execute(\"SELECT COUNT(*) FROM {}\".format(stress_table)))[0][0]\n assert num_records_imported < num_records", "def _test_exception_in_worker_impl(self, pool, num_to_ventilate):\n # exception should be propagated to calling thread\n pool.start(ExceptionGeneratingWorker_5)\n for i in range(num_to_ventilate):\n pool.ventilate(\"Datanum_%d\" % i)\n with self.assertRaises(ValueError):\n pool.get_results()", "def test_unroll_special(self):\r\n self.validate((6, 2, 3, 3), (3, 2, 2, 2), 'valid', unroll_batch=1)", "async def after_task_failure(\n self, key: str, exec_info: ExecutionInfo, error: ErrorInfo\n ) -> None:\n exec_info.task_graph.set_status(key, TaskStatus.FAILED, error=error)\n\n always_eager_coros = []\n\n ready_tasks = exec_info.task_graph.get_ready_tasks()\n\n for task_key in exec_info.task_graph.get_descendants(key):\n task = exec_info.task_graph.get_task(task_key)\n if task.always_eager():\n # If the task is always eager we won't skip it, and if it's ready to\n # run we will actually trigger it here. If this is something like a graph\n # operation it's alright for it to fail, that simply means the graph\n # won't be mutated. It supports partial checkpoints for tasks\n if task_key in ready_tasks:\n coro = self.task_wrapper(task_key, task, exec_info)\n always_eager_coros.append(coro)\n else:\n exec_info.task_graph.set_status(\n task_key, TaskStatus.SKIPPED, skipped_by=key\n )\n\n if exec_info.cancelled_by is None:\n exec_info.cancelled_by = key\n\n if always_eager_coros:\n await asyncio.wait(always_eager_coros)" ]
[ "0.637449", "0.62774134", "0.60077596", "0.5994124", "0.5918459", "0.57265055", "0.56882507", "0.56343436", "0.558805", "0.55751175", "0.55216396", "0.55209655", "0.54577637", "0.53927845", "0.53148663", "0.53103", "0.5290059", "0.5279151", "0.5238429", "0.5230983", "0.5226171", "0.5223245", "0.52101535", "0.517666", "0.5170128", "0.5161223", "0.51274467", "0.5118941", "0.5113437", "0.51015586", "0.5091366", "0.50857395", "0.5083136", "0.50788593", "0.50758666", "0.5063961", "0.505849", "0.5057424", "0.5053329", "0.5051752", "0.5048257", "0.50468796", "0.5042061", "0.5029571", "0.5015237", "0.501311", "0.5007974", "0.499928", "0.4996371", "0.49913636", "0.49794033", "0.4975143", "0.49675405", "0.4966717", "0.4962554", "0.49625018", "0.49598637", "0.4954309", "0.49414584", "0.49408886", "0.493647", "0.4935075", "0.493069", "0.49286467", "0.49247482", "0.49186873", "0.49172637", "0.4913623", "0.49112904", "0.49107993", "0.4899969", "0.4897711", "0.48962957", "0.48947236", "0.48855954", "0.48765317", "0.48747277", "0.48704174", "0.48606429", "0.48545662", "0.4853303", "0.48495233", "0.48454842", "0.48448887", "0.4844451", "0.48433802", "0.48414594", "0.4834612", "0.48086286", "0.4807705", "0.4804758", "0.4804488", "0.4802925", "0.48020223", "0.47966334", "0.47936863", "0.4792653", "0.47897846", "0.47853702", "0.47814128" ]
0.6903556
0
Ensure task batches are split and insertion is retried on TaskAlreadyExistsError.
def test_splits_on_taskexists(self, queue_add_mock): from google.appengine.api import taskqueue from sosbeacon.utils import insert_tasks queue_add_mock.side_effect = taskqueue.TaskAlreadyExistsError tasks = [i for i in xrange(0, 10)] added = insert_tasks(tasks, 'default') self.assertEqual(added, 0) self.assertEqual(queue_add_mock.call_count, 19)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_splits_on_tombstoned(self, queue_add_mock):\n from google.appengine.api import taskqueue\n from sosbeacon.utils import insert_tasks\n\n queue_add_mock.side_effect = taskqueue.TombstonedTaskError\n\n tasks = [i for i in xrange(0, 7)]\n added = insert_tasks(tasks, 'default')\n\n self.assertEqual(added, 0)\n self.assertEqual(queue_add_mock.call_count, 13)", "def test_splits_once(self, queue_add_mock):\n from google.appengine.api import taskqueue\n from sosbeacon.utils import insert_tasks\n\n def side_effect(*args):\n if 2 in args[0]:\n raise taskqueue.TombstonedTaskError('uh oh')\n\n queue_add_mock.side_effect = side_effect\n\n tasks = [i for i in xrange(0, 9)]\n added = insert_tasks(tasks, 'default')\n\n self.assertEqual(added, 8)\n self.assertEqual(queue_add_mock.call_count, 7)", "def test_block_missing_batch(self):\n pass", "def _bulk_create_with_retry(\n self, table: IndexerTable, new_records: Sequence[BaseIndexer]\n ) -> None:\n retry_count = 0\n sleep_ms = 5\n last_seen_exception: Optional[BaseException] = None\n\n with metrics.timer(\"sentry_metrics.indexer.pg_bulk_create\"):\n # We use `ignore_conflicts=True` here to avoid race conditions where metric indexer\n # records might have be created between when we queried in `bulk_record` and the\n # attempt to create the rows down below.\n while retry_count + 1 < settings.SENTRY_POSTGRES_INDEXER_RETRY_COUNT:\n try:\n table.objects.bulk_create(new_records, ignore_conflicts=True)\n return\n except OperationalError as e:\n sentry_sdk.capture_message(\n f\"retryable deadlock exception encountered; pgcode={e.pgcode}, pgerror={e.pgerror}\"\n )\n if e.pgcode == DEADLOCK_DETECTED:\n metrics.incr(\"sentry_metrics.indexer.pg_bulk_create.deadlocked\")\n retry_count += 1\n sleep(sleep_ms / 1000 * (2**retry_count))\n last_seen_exception = e\n else:\n raise e\n # If we haven't returned after successful bulk create, we should re-raise the last\n # seen exception\n assert isinstance(last_seen_exception, BaseException)\n raise last_seen_exception", "def test_no_chunk_size_no_n_splits_provided(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks([]))", "async def test_delete_batch_invalid(database,valid_data):\n test_valid_insert_batch(database,valid_data)\n N = 10\n batch_id = 1\n for idx in range(N+1,N*2):\n try:\n await database.delete_batch(batch_id=batch_id,user_id=idx)\n assert False\n except:\n assert True\n await database.close_pool()", "def test_block_bad_batch(self):\n pass", "def test_block_missing_batch_dependency(self):\n pass", "def test_insert_batch(self, queue_mock):\n from sosbeacon.utils import insert_tasks\n\n tasks = []\n for i in xrange(1, 10):\n tasks.append(object())\n added = insert_tasks(tasks, 'default')\n self.assertEqual(added, 9)", "async def create_checkpoint_if_not_exists_async(self, partition_id):", "def test_integrity_error_bulk_create(self):\n link1, link2 = LinkFactory.create_batch(2)\n self.service.get_clicks_for_date.return_value = {\n unicode(link1.pk): '4',\n unicode(link2.pk): '7'\n }\n\n with patch.object(collect_ga_data, 'DataPoint') as MockDataPoint:\n MockDataPoint.objects.bulk_create.side_effect = IntegrityError\n\n with self.assertRaises(CommandError):\n self.command.execute()", "def test_check_existing_enqueues_tasks(self):\n collection = handlers_endpoints_v1.DigestCollection(\n namespace=handlers_endpoints_v1.Namespace())\n collection.items.append(\n generate_digest(collection.namespace.namespace, 'some content'))\n key = model.get_entry_key(\n collection.namespace.namespace, collection.items[0].digest)\n\n # guarantee that one digest already exists in the datastore\n model.new_content_entry(key).put()\n self.call_api('preupload', self.message_to_dict(collection), 200)\n\n # find enqueued tasks\n self.assertEqual(1, self.execute_tasks())", "def testTrainSplitError(self):\n with self.assertRaises(Exception):\n mnli.MnliDataset(\n mode='mismatched',\n split=tfds.Split.TRAIN,\n shuffle_buffer_size=20)", "def test_block_batches_order(self):\n pass", "def test_instantiating_salesforce_bulk_job_validates_operation(salesforce_session, bulk_request):\n with pytest.raises(AssertionError):\n SalesforceBulkJob('floob', 'Lead')", "def test_invalid_follow_task_true(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=None,\n follow_task_ids_if_false=\"branch_2\",\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_bulk_group_errors(self):\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_actg_missing_col)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n res = self.client.post(self.ag_url, data)\n assert res.status_code == status.HTTP_400_BAD_REQUEST", "def test_bulk_round_trip_with_backoff(self):\n self._test_bulk_round_trip(nodes=3, partitioner=\"murmur3\", num_operations=250000,\n copy_from_options={'MAXINFLIGHTMESSAGES': 64, 'MAXPENDINGCHUNKS': 1})", "def test_task_preloading(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n # Register\r\n self.register()\r\n self.signin()\r\n\r\n assigned_tasks = []\r\n # Get Task until scheduler returns None\r\n res = self.app.get('api/app/1/newtask')\r\n task1 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task1.get('info'), task1\r\n # Pre-load the next task for the user\r\n res = self.app.get('api/app/1/newtask?offset=1')\r\n task2 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task2.get('info'), task2\r\n # Check that both tasks are different\r\n assert task1.get('id') != task2.get('id'), \"Tasks should be different\"\r\n ## Save the assigned task\r\n assigned_tasks.append(task1)\r\n assigned_tasks.append(task2)\r\n\r\n # Submit an Answer for the assigned and pre-loaded task\r\n for t in assigned_tasks:\r\n tr = dict(app_id=t['app_id'], task_id=t['id'], info={'answer': 'No'})\r\n tr = json.dumps(tr)\r\n\r\n self.app.post('/api/taskrun', data=tr)\r\n # Get two tasks again\r\n res = self.app.get('api/app/1/newtask')\r\n task3 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task3.get('info'), task1\r\n # Pre-load the next task for the user\r\n res = self.app.get('api/app/1/newtask?offset=1')\r\n task4 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task4.get('info'), task2\r\n # Check that both tasks are different\r\n assert task3.get('id') != task4.get('id'), \"Tasks should be different\"\r\n assert task1.get('id') != task3.get('id'), \"Tasks should be different\"\r\n assert task2.get('id') != task4.get('id'), \"Tasks should be different\"\r\n # Check that a big offset returns None\r\n res = self.app.get('api/app/1/newtask?offset=11')\r\n assert json.loads(res.data) == {}, res.data", "def test_task_errors(self):\r\n user = User(\r\n email_addr=\"john.doe@example.com\",\r\n name=\"johndoe\",\r\n fullname=\"John Doe\",\r\n locale=\"en\")\r\n db.session.add(user)\r\n db.session.commit()\r\n user = db.session.query(User).first()\r\n app = App(\r\n name='Application',\r\n short_name='app',\r\n description='desc',\r\n owner_id=user.id)\r\n db.session.add(app)\r\n db.session.commit()\r\n\r\n task = Task(app_id=None)\r\n db.session.add(task)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()", "def acknowledged_by_batchlog_not_set_when_batchlog_write_fails_test(self):\n session = self.prepare(nodes=3, compression=False)\n # kill 2 of the 3 nodes (all the batchlog write candidates).\n [ node.stop(gently=False) for node in self.cluster.nodelist()[1:] ]\n self.assert_timedout(session, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", ConsistencyLevel.ONE, received_responses=0)", "def test_invalid_follow_task_false(self):\n op = BranchSQLOperator(\n task_id=\"make_choice\",\n conn_id=\"invalid_connection\",\n sql=\"SELECT count(1) FROM INFORMATION_SCHEMA.TABLES\",\n follow_task_ids_if_true=\"branch_1\",\n follow_task_ids_if_false=None,\n dag=self.dag,\n )\n\n with pytest.raises(AirflowException):\n op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)", "def test_default_maximum_conflict(self):\n\n self.set_options(timeouts=True, timeout_maximum=1, timeout_default=10)\n task = self.create_task(self.context())\n with self.assertRaises(ErrorWhileTesting):\n task.execute()", "def test_task_run_errors(self):\r\n user = User(\r\n email_addr=\"john.doe@example.com\",\r\n name=\"johndoe\",\r\n fullname=\"John Doe\",\r\n locale=\"en\")\r\n db.session.add(user)\r\n db.session.commit()\r\n\r\n user = db.session.query(User).first()\r\n app = App(\r\n name='Application',\r\n short_name='app',\r\n description='desc',\r\n owner_id=user.id)\r\n db.session.add(app)\r\n db.session.commit()\r\n\r\n task = Task(app_id=app.id)\r\n db.session.add(task)\r\n db.session.commit()\r\n\r\n task_run = TaskRun(app_id=None, task_id=task.id)\r\n db.session.add(task_run)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()\r\n\r\n task_run = TaskRun(app_id=app.id, task_id=None)\r\n db.session.add(task_run)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()", "def test_rollbacked_transaction_discard_task(self):\n\n @transaction.commit_on_success\n def do_something():\n my_task.delay()\n raise SpecificException\n try:\n do_something()\n except SpecificException:\n self.assertFalse(my_global)\n else:\n self.fail('Exception not raised')", "def retry_failed(FailAdmin, request, queryset):\n for task in queryset:\n async_task(task.func, *task.args or (), hook=task.hook, **task.kwargs or {})\n task.delete()", "def test_synchronize_splits_error(self, mocker):\n storage = mocker.Mock(spec=SplitStorage)\n api = mocker.Mock()\n\n def run(x, c):\n raise APIException(\"something broke\")\n run._calls = 0\n api.fetch_splits.side_effect = run\n storage.get_change_number.return_value = -1\n\n from splitio.sync.split import SplitSynchronizer\n split_synchronizer = SplitSynchronizer(api, storage)\n\n with pytest.raises(APIException):\n split_synchronizer.synchronize_splits(1)", "def acknowledged_by_batchlog_set_when_batchlog_write_succeeds_test(self):\n session = self.prepare(nodes=3, compression=False)\n # kill one of the nodes so that batchlog will be written, but the write will fail.\n self.cluster.nodelist()[-1].stop(gently=False)\n self.assert_timedout(session, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", ConsistencyLevel.THREE, received_responses=2)", "def test_loopFailure_recovery(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n oldAssign = JobItem.assign\n @inlineCallbacks\n def _assign(self, when, overdue):\n work = yield self.workItem()\n if work.a == -2:\n raise ValueError(\"oops\")\n yield oldAssign(self, when, overdue)\n\n self.patch(JobItem, \"assign\", _assign)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Failing\n yield DummyWorkItem.makeJob(\n txn, a=-2, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=1, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60, 5)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertEqual(len(jobs), 1)\n self.assertEqual(jobs[0].assigned, None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertEqual(jobs[0].failed, 1)\n self.assertGreater(jobs[0].notBefore, datetime.datetime.utcnow() + datetime.timedelta(seconds=30))", "def _skip_aware_next_available_task_id(request, batch):\n def _get_skipped_task_ids_for_batch(session, batch_id):\n batch_id = str(batch_id)\n if 'skipped_tasks_in_batch' in session and \\\n batch_id in session['skipped_tasks_in_batch']:\n return session['skipped_tasks_in_batch'][batch_id]\n else:\n return None\n\n available_task_ids = batch.available_task_ids_for(request.user)\n skipped_ids = _get_skipped_task_ids_for_batch(request.session, batch.id)\n\n if skipped_ids:\n task_id = available_task_ids.exclude(id__in=skipped_ids).first()\n if not task_id:\n task_id = available_task_ids.filter(id__in=skipped_ids).first()\n if task_id:\n messages.info(request, 'Only previously skipped Tasks are available')\n\n # Once all remaining Tasks have been marked as skipped, we clear\n # their skipped status. If we don't take this step, then a Task\n # cannot be skipped a second time.\n request.session['skipped_tasks_in_batch'][str(batch.id)] = []\n request.session.modified = True\n else:\n task_id = available_task_ids.first()\n\n return task_id", "def transaction_failed_before_processing(self):", "def bulkCreateTasks(request, *args, **kwargs):\n import settings\n\n # keep track of our own timelimit (20 seconds)\n timelimit = 20000\n timekeeper = Timekeeper(timelimit)\n\n post_dict = request.POST\n\n bulk_create_key = post_dict.get('bulk_create_key')\n if not bulk_create_key:\n return error_handler.logErrorAndReturnOK(\n 'Not all POST data specified in: %s' % post_dict)\n\n bulk_data = bulk_create_model.GCIBulkCreateData.get(bulk_create_key)\n if not bulk_data:\n return error_handler.logErrorAndReturnOK(\n 'No valid data found for key: %s' % bulk_create_key)\n\n # note that we only query for the quota once\n org_admin = bulk_data.created_by\n task_quota = org_logic.getRemainingTaskQuota(org_admin.scope)\n\n tasks = bulk_data.tasks\n while len(tasks) > 0:\n try:\n # check if we have time\n timekeeper.ping()\n\n if settings.GCI_TASK_QUOTA_LIMIT_ENABLED and task_quota <= 0:\n return error_handler.logErrorAndReturnOK(\n 'Task quota reached for %s' %(org_admin.scope.name))\n\n # remove the first task\n task_as_string = tasks.pop(0)\n\n loaded_task = simplejson.loads(task_as_string)\n task = {}\n for key, value in loaded_task.iteritems():\n # If we don't do this python will complain about kwargs not being\n # strings when we try to save the new task.\n task[key.encode('UTF-8')] = value\n\n logging.info('Uncleaned task: %s' %task)\n # clean the data\n errors = _cleanTask(task, org_admin)\n\n if errors:\n logging.warning(\n 'Invalid task data uploaded, the following errors occurred: %s'\n %errors)\n bulk_data.errors.append(db.Text(\n 'The task in row %i contains the following errors.\\n %s' \\\n %(bulk_data.tasksRemoved(), '\\n'.join(errors))))\n\n # at-most-once semantics for creating tasks\n bulk_data.put()\n\n if errors:\n # do the next task\n continue\n\n # set other properties\n task['link_id'] = 't%i' % (int(time.time()*100))\n task['scope'] = org_admin.scope\n task['scope_path'] = org_admin.scope_path\n task['program'] = org_admin.program\n task['status'] = 'Unpublished'\n task['created_by'] = org_admin\n task['modified_by'] = org_admin\n\n # create the new task\n logging.info('Creating new task with fields: %s' %task)\n task_logic.updateOrCreateFromFields(task)\n task_quota = task_quota - 1\n except DeadlineExceededError:\n # time to bail out\n pass\n\n if len(tasks) == 0:\n # send out a message\n notifications.sendBulkCreationCompleted(bulk_data)\n bulk_data.delete()\n else:\n # there is still work to be done, do a non 500 response and requeue\n task_params = {\n 'bulk_create_key': bulk_data.key().id_or_name()\n }\n new_task = taskqueue.Task(params=task_params,\n url=BULK_CREATE_URL)\n # add to the gci queue\n new_task.add(queue_name='gci-update')\n\n # we're done here\n return http.HttpResponse('OK')", "async def test_delete_batch_valid(database, valid_data):\n await test_valid_insert_batch(database,valid_data)\n database = await Database.connect_pool()\n N = 10\n batch_id = 1\n for idx in range(N):\n await database.delete_batch(batch_id=batch_id,user_id=str(idx))\n await database.close_pool()", "def validate_batch(self, *args, **kwargs):\n raise NotImplementedError()", "def logged_batch_throws_uae_test(self):\n cursor = self.prepare(nodes=3)\n [ node.stop(wait_other_notice=True) for node in self.cluster.nodelist()[1:] ]\n cursor.consistency_level = 'ONE'\n assert_unavailable(cursor.execute, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\")", "def test_re_split_cell_raises_error(mock_amg):\n\n # splitting once should work as normal\n mock_amg.cells[4].split()\n\n with pytest.raises(ValueError):\n mock_amg.cells[4].split()", "async def _clean_up_batch_of_old_cache_invalidations(\n self, delete_up_to_millisec: int\n ) -> bool:\n\n def _clean_up_batch_of_old_cache_invalidations_txn(\n txn: LoggingTransaction,\n ) -> bool:\n # First get the earliest stream ID\n txn.execute(\n \"\"\"\n SELECT stream_id FROM cache_invalidation_stream_by_instance\n ORDER BY stream_id ASC\n LIMIT 1\n \"\"\"\n )\n row = txn.fetchone()\n if row is None:\n return False\n earliest_stream_id: int = row[0]\n\n # Then find the last stream ID of the range we will delete\n txn.execute(\n \"\"\"\n SELECT stream_id FROM cache_invalidation_stream_by_instance\n WHERE stream_id <= ? AND invalidation_ts <= ?\n ORDER BY stream_id DESC\n LIMIT 1\n \"\"\",\n (earliest_stream_id + CLEAN_UP_MAX_BATCH_SIZE, delete_up_to_millisec),\n )\n row = txn.fetchone()\n if row is None:\n return False\n cutoff_stream_id: int = row[0]\n\n # Determine whether we are caught up or still catching up\n txn.execute(\n \"\"\"\n SELECT invalidation_ts FROM cache_invalidation_stream_by_instance\n WHERE stream_id > ?\n ORDER BY stream_id ASC\n LIMIT 1\n \"\"\",\n (cutoff_stream_id,),\n )\n row = txn.fetchone()\n if row is None:\n in_backlog = False\n else:\n # We are in backlog if the next row could have been deleted\n # if we didn't have such a small batch size\n in_backlog = row[0] <= delete_up_to_millisec\n\n txn.execute(\n \"\"\"\n DELETE FROM cache_invalidation_stream_by_instance\n WHERE ? <= stream_id AND stream_id <= ?\n \"\"\",\n (earliest_stream_id, cutoff_stream_id),\n )\n\n return in_backlog\n\n return await self.db_pool.runInteraction(\n \"clean_up_old_cache_invalidations\",\n _clean_up_batch_of_old_cache_invalidations_txn,\n )", "def test_fail_repeated_buckelist_item(self):\r\n user = User.query.filter_by(email=\"test@test.com\").first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n response = self.add_bucketlist_item(\"test@test.com\", \"test\", bucketlist.id, \"test item\")\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '409 CONFLICT')\r\n self.assertEqual(result['message'], 'Bucketlist Item Exists')\r\n new_item_no = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id).count()\r\n self.assertEqual(item_no, new_item_no)", "def test_loopFailure_failedRecovery(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n oldAssign = JobItem.assign\n @inlineCallbacks\n def _assign(self, when, overdue):\n work = yield self.workItem()\n if work.a == -2:\n raise ValueError(\"oops\")\n yield oldAssign(self, when, overdue)\n\n self.patch(JobItem, \"assign\", _assign)\n\n @inlineCallbacks\n def _failedToRun(self, locked=False, delay=None):\n raise ValueError(\"oops\")\n\n self.patch(JobItem, \"failedToRun\", _failedToRun)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Failing\n yield DummyWorkItem.makeJob(\n txn, a=-2, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=1, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60, 5)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertEqual(len(jobs), 2)\n self.assertEqual(jobs[0].assigned, None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertEqual(jobs[0].failed, 0)\n self.assertEqual(jobs[0].notBefore, fakeNow - datetime.timedelta(20 * 60))\n self.assertEqual(jobs[1].assigned, None)\n self.assertEqual(jobs[1].isAssigned, 0)\n self.assertEqual(jobs[1].failed, 0)\n self.assertEqual(jobs[1].notBefore, fakeNow - datetime.timedelta(20 * 60, 5))", "def test_import_error_record_is_updated_not_deleted_and_recreated(self, tmpdir):\n filename_to_parse = os.path.join(tmpdir, TEMP_DAG_FILENAME)\n\n # Generate original import error\n with open(filename_to_parse, \"w\") as file_to_parse:\n file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)\n session = settings.Session()\n self._process_file(filename_to_parse, dag_directory=tmpdir, session=session)\n\n import_error_1 = (\n session.query(errors.ImportError).filter(errors.ImportError.filename == filename_to_parse).one()\n )\n\n # process the file multiple times\n for _ in range(10):\n self._process_file(filename_to_parse, dag_directory=tmpdir, session=session)\n\n import_error_2 = (\n session.query(errors.ImportError).filter(errors.ImportError.filename == filename_to_parse).one()\n )\n\n # assert that the ID of the import error did not change\n assert import_error_1.id == import_error_2.id", "def test_retry_errors_sooner(self):\n config_manager, json_file = self._setup_config_manager(\n 'socorro.unittest.cron.test_crontabber.BarBackfillJob|1d\\n'\n 'socorro.unittest.cron.test_crontabber.FooBackfillJob|1d\\n'\n 'socorro.unittest.cron.test_crontabber.FooBarBackfillJob|1d',\n extra_value_source={\n # crontabber already has a good default for this but by\n # being explict like this we not only show that it can be\n # changed, we also make it clear what the unit test is\n # supposed to do.\n 'crontabber.error_retry_time': '3600' # 1 hour\n }\n )\n\n # first we need to hack-about so that BarBackfillJob fails only\n # once.\n\n class SomeError(Exception):\n pass\n\n def nosy_run(self, date):\n dates_used[self.__class__].append(date)\n if self.__class__ == BarBackfillJob:\n if len(dates_used[self.__class__]) == 1:\n # first time run, simulate trouble\n raise SomeError(\"something went wrong\")\n return originals[self.__class__](self, date)\n\n classes = BarBackfillJob, FooBackfillJob, FooBarBackfillJob\n originals = {}\n dates_used = collections.defaultdict(list)\n for klass in classes:\n originals[klass] = klass.run\n klass.run = nosy_run\n\n try:\n with config_manager.context() as config:\n tab = crontabber.CronTabber(config)\n tab.run_all()\n self.assertEqual(len(dates_used[FooBackfillJob]), 1)\n self.assertEqual(len(dates_used[FooBackfillJob]), 1)\n # never gets there because dependency fails\n self.assertEqual(len(dates_used[FooBarBackfillJob]), 0)\n\n structure = json.load(open(json_file))\n assert structure['foo-backfill']\n assert not structure['foo-backfill']['last_error']\n next_date = utc_now() + datetime.timedelta(days=1)\n assert (\n next_date.strftime('%Y-%m-%d %H:%M') in\n structure['foo-backfill']['next_run']\n )\n\n assert structure['bar-backfill']\n assert structure['bar-backfill']['last_error']\n next_date = utc_now() + datetime.timedelta(hours=1)\n assert (\n next_date.strftime('%Y-%m-%d %H:%M') in\n structure['bar-backfill']['next_run']\n )\n\n assert 'foobar-backfill' not in structure\n\n # Now, let the magic happen, we pretend time passes by 2 hours\n # and run all jobs again\n self._wind_clock(json_file, hours=2)\n # this forces in crontabber instance to reload the JSON file\n tab._database = None\n\n # here, we go two hours later\n tab.run_all()\n\n # Here's the magic sauce! The FooBarBackfillJob had to wait\n # two hours to run after FooBackfillJob but it should\n # have been given the same date input as when FooBackfillJob\n # ran.\n self.assertEqual(len(dates_used[FooBackfillJob]), 1)\n self.assertEqual(len(dates_used[FooBackfillJob]), 1)\n self.assertEqual(len(dates_used[FooBarBackfillJob]), 1)\n\n # use this formatter so that we don't have to compare\n # datetimes with microseconds\n format = lambda x: x.strftime('%Y%m%d %H:%M %Z')\n self.assertEqual(\n format(dates_used[FooBackfillJob][0]),\n format(dates_used[FooBarBackfillJob][0])\n )\n # also check the others\n self.assertEqual(\n format(dates_used[BarBackfillJob][0]),\n format(dates_used[FooBarBackfillJob][0])\n )\n\n structure = json.load(open(json_file))\n self.assertTrue(structure['foo-backfill'])\n self.assertTrue(not structure['foo-backfill']['last_error'])\n self.assertTrue(structure['bar-backfill'])\n self.assertTrue(not structure['bar-backfill']['last_error'])\n self.assertTrue(structure['foobar-backfill'])\n self.assertTrue(not structure['foobar-backfill']['last_error'])\n\n finally:\n for klass in classes:\n klass.run = originals[klass]", "def test_delete_task_cascade(self):\r\n task = TaskFactory.create()\r\n task_runs = TaskRunFactory.create_batch(3, task=task)\r\n url = '/api/task/%s?api_key=%s' % (task.id, task.app.owner.api_key)\r\n res = self.app.delete(url)\r\n\r\n assert_equal(res.status, '204 NO CONTENT', res.data)\r\n task_runs = db.session.query(TaskRun)\\\r\n .filter_by(task_id=task.id)\\\r\n .all()\r\n assert len(task_runs) == 0, \"There should not be any task run for task\"", "def test_loopFailure_noRecovery(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n oldNextJob = JobItem.nextjob\n @inlineCallbacks\n def _nextJob(cls, txn, now, minPriority, rowLimit):\n job = yield oldNextJob(txn, now, minPriority, rowLimit)\n work = yield job.workItem()\n if work.a == -2:\n raise ValueError(\"oops\")\n\n self.patch(JobItem, \"nextjob\", classmethod(_nextJob))\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Failing\n yield DummyWorkItem.makeJob(\n txn, a=-2, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=1, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60, 5)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertEqual(len(jobs), 2)\n self.assertEqual(jobs[0].assigned, None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertEqual(jobs[0].failed, 0)\n self.assertEqual(jobs[0].notBefore, fakeNow - datetime.timedelta(20 * 60))\n self.assertEqual(jobs[1].assigned, None)\n self.assertEqual(jobs[1].isAssigned, 0)\n self.assertEqual(jobs[1].failed, 0)\n self.assertEqual(jobs[1].notBefore, fakeNow - datetime.timedelta(20 * 60, 5))", "async def test_delete_invalid(database,valid_data):\n test_valid_insert(database,valid_data)\n N = 10\n for idx in range(N+1,N*2):\n try:\n await database.delete(_id=idx,user_id=idx)\n assert False\n except:\n assert True\n await database.close_pool()", "def _test_retry_after_unlimited_retry_error(self, exception):\r\n num_emails = 8\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = 0\r\n expected_succeeds = num_emails\r\n # Note that because celery in eager mode will call retries synchronously,\r\n # each retry will increase the stack depth. It turns out that there is a\r\n # maximum depth at which a RuntimeError is raised (\"maximum recursion depth\r\n # exceeded\"). The maximum recursion depth is 90, so\r\n # num_emails * expected_retries < 90.\r\n expected_retries = 10\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # Cycle through N throttling errors followed by a success.\r\n get_conn.return_value.send_messages.side_effect = cycle(\r\n chain(repeat(exception, expected_retries), [None])\r\n )\r\n self._test_run_with_task(\r\n send_bulk_course_email,\r\n 'emailed',\r\n num_emails,\r\n expected_succeeds,\r\n failed=expected_fails,\r\n retried_nomax=(expected_retries * num_emails)\r\n )", "async def test_invalid_insert_user_duplicate_key(database):\n await database.setup_database(reset=True)\n await database.insert_user(\"\")\n for user_id in zip([\"1\" for _ in range(0,10)]):\n try:\n await database.insert_user(user_id=user_id)\n assert False\n except:\n assert True\n await database.close_pool()", "def test_sync_object_task_retries_on_error(monkeypatch, es):\n sync_object_mock = Mock(side_effect=[Exception, None])\n monkeypatch.setattr('datahub.search.sync_object.sync_object', sync_object_mock)\n\n sync_object_task.apply(args=(SimpleModelSearchApp.name, str(uuid4())))\n\n assert sync_object_mock.call_count == 2", "def _test_undefined_problem(self, task_class):\r\n task_entry = self._create_input_entry()\r\n with self.assertRaises(ItemNotFoundError):\r\n self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id)", "async def test_invalid_insert_duplicate_key(database, duplicate_data):\n await database.setup_database(reset=True)\n await database.insert_user(\"1\")\n\n await database.insert(id=1,user_id=\"1\",embeddings=[1,2])\n for id,user_id,embeddings,batch_id in duplicate_data:\n try:\n await database.insert(id=id,\n user_id=user_id,\n embeddings=embeddings,\n batch_id=batch_id)\n assert False\n except:\n assert True\n await database.close_pool()", "def logged_batch_throws_uae_test(self):\n session = self.prepare(nodes=3)\n [ node.stop(wait_other_notice=True) for node in self.cluster.nodelist()[1:] ]\n session.consistency_level = 'ONE'\n assert_unavailable(session.execute, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\")", "def test_task_failed(self):\n\n task1 = FailedTask(mock.Mock(), total_retries=0)\n task2 = mock.Mock(execute_after=0)\n\n g = TaskDependencyGraph(MockWorkflowContext())\n seq = g.sequence()\n seq.add(task1, task2)\n\n with limited_sleep_mock():\n self.assertRaisesRegex(WorkflowFailed, 'failtask', g.execute)\n self.assertTrue(task1.is_terminated)\n self.assertFalse(task2.apply_async.called)", "def aknowledged_by_batchlog_not_set_when_batchlog_write_fails_test(self):\n cursor = self.prepare(nodes=3)\n # kill 2 of the 3 nodes (all the batchlog write candidates).\n [ node.stop(gently=False) for node in self.cluster.nodelist()[1:] ]\n self.assert_timedout(cursor, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", ConsistencyLevel.ONE, acknowledged_by_batchlog=False)", "def test_raise_enum_upsert_schema_error(data_row, mdo):\n\n metadata = DataRowMetadata(data_row_id=data_row.uid,\n fields=[\n DataRowMetadataField(schema_id=TEST_SPLIT_ID,\n value=SPLIT_SCHEMA_ID),\n ])\n with pytest.raises(ValueError):\n mdo.bulk_upsert([metadata])", "def test_failedUpload(self):\n def _storeObject(content, contentType, metadata={}, created=None,\n objectId=None):\n raise ValueError('blah blah')\n object.__setattr__(self.backendStore, 'storeObject', _storeObject)\n\n nextScheduled = self.pendingUpload.scheduled + timedelta(minutes=5)\n def _nextAttempt():\n return nextScheduled\n object.__setattr__(self.pendingUpload, '_nextAttempt', _nextAttempt)\n\n self.successResultOf(self.pendingUpload.attemptUpload())\n self.assertIdentical(self.store.findUnique(_PendingUpload),\n self.pendingUpload)\n self.assertEquals(self.pendingUpload.scheduled,\n nextScheduled)\n errors = self.flushLoggedErrors(ValueError)\n self.assertEquals(len(errors), 1)", "def test_after_batch_injection(self):\n intkey_batch1 = create_batch(self.signer, [('inc', 'abcd', 30)])\n intkey_batch2 = create_batch(self.signer, [('inc', 'abcd', 40)])\n intkey_batch3 = create_batch(self.signer, [('inc', 'abcd', 50)])\n batches = create_batch_list(\n [intkey_batch1, intkey_batch2, intkey_batch3])\n\n post_batch(batches)\n\n # Assert injected batches are after each intkey transaction\n # get last committed block (first from the list)\n last_block = get_blocks()[0]\n\n family_name = get_family_from(last_block['batches'][0])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][0]))\n self.assertEqual(payload.get('Name'), 'abcd')\n family_name = get_family_from(last_block['batches'][1])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][1]))\n self.assertEqual(payload.get('Name'), 'inject')\n\n family_name = get_family_from(last_block['batches'][2])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][2]))\n self.assertEqual(payload.get('Name'), 'abcd')\n family_name = get_family_from(last_block['batches'][3])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][3]))\n self.assertEqual(payload.get('Name'), 'inject')\n\n family_name = get_family_from(last_block['batches'][4])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][4]))\n self.assertEqual(payload.get('Name'), 'abcd')\n family_name = get_family_from(last_block['batches'][5])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][5]))\n self.assertEqual(payload.get('Name'), 'inject')", "async def test_valid_insert_batch(database,valid_data):\n await database.setup_database(reset=True)\n data = []\n for _id,user_id,embeddings,batch_id in valid_data: #pylint: disable=unused-variable\n await database.insert_user(user_id)\n data.append((_id,user_id,embeddings,1))\n await database.insert_batch(data)\n await database.close_pool()", "def _enqueue_task(self):\n\t\t# TODO(bslatkin): Remove these retries when they're not needed in userland.\n\t\tRETRIES = 3\n\t\ttarget_queue = os.environ.get('X_APPENGINE_QUEUENAME', constants.FEED_QUEUE)\n\t\tfor i in xrange(RETRIES):\n\t\t\ttry:\n\t\t\t\ttaskqueue.Task(\n\t\t\t\t\t\turl='/work/pull_feeds',\n\t\t\t\t\t\teta=self.eta,\n\t\t\t\t\t\tparams={'topic': self.topic}\n\t\t\t\t\t\t).add(target_queue)\n\t\t\texcept (taskqueue.Error, apiproxy_errors.Error):\n\t\t\t\tlogging.exception('Could not insert task to fetch topic = %s',\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.topic)\n\t\t\t\tif i == (RETRIES - 1):\n\t\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn", "def test_before_batch_injection(self):\n intkey_batch1 = create_batch(self.signer, [('set', 'abcd', 0)])\n intkey_batch2 = create_batch(self.signer, [('inc', 'abcd', 10)])\n intkey_batch3 = create_batch(self.signer, [('inc', 'abcd', 20)])\n batches = create_batch_list(\n [intkey_batch1, intkey_batch2, intkey_batch3])\n\n post_batch(batches)\n\n # Assert injected batches are before each intkey transaction\n # get last committed block (first from the list)\n last_block = get_blocks()[0]\n\n family_name = get_family_from(last_block['batches'][0])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][0]))\n self.assertEqual(payload.get('Name'), 'inject')\n family_name = get_family_from(last_block['batches'][1])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][1]))\n self.assertEqual(payload.get('Name'), 'abcd')\n\n family_name = get_family_from(last_block['batches'][2])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][2]))\n self.assertEqual(payload.get('Name'), 'inject')\n family_name = get_family_from(last_block['batches'][3])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][3]))\n self.assertEqual(payload.get('Name'), 'abcd')\n\n family_name = get_family_from(last_block['batches'][4])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][4]))\n self.assertEqual(payload.get('Name'), 'inject')\n family_name = get_family_from(last_block['batches'][5])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][5]))\n self.assertEqual(payload.get('Name'), 'abcd')", "def test_bulk_round_trip_with_timeouts(self):\n self._test_bulk_round_trip(nodes=1, partitioner=\"murmur3\", num_operations=100000,\n configuration_options={'range_request_timeout_in_ms': '200',\n 'write_request_timeout_in_ms': '100'},\n copy_from_options={'MAXINSERTERRORS': -1},\n skip_count_checks=True)", "def _retry(*, task, signature_kwargs, retries):\n if retries < MAX_RETRIES:\n step = task.signature(**signature_kwargs)\n queue = step.options.get(\"queue\", task.queue)\n step.options[\"queue\"] = f\"{queue}-delay\"\n step.kwargs[\"retries\"] = retries + 1\n on_commit(step.apply_async)\n else:\n raise MaxRetriesExceededError", "def test_check_chunk_n(self):\n st, frontend_setup = self.get_st_and_fill_frontends()\n\n sf = st.storage[0]\n st_new = st.new_context()\n st_new.storage = [sf]\n key = st_new.key_for(self.run_id, self.target)\n backend, backend_key = sf.find(key, **st_new._find_options)\n prefix = strax.storage.files.dirname_to_prefix(backend_key)\n md = st_new.get_metadata(self.run_id, self.target)\n md['chunks'][0]['n'] += 1\n md_path = os.path.join(backend_key, f'{prefix}-metadata.json')\n with open(md_path, \"w\") as file:\n json.dump(md, file, indent=4)\n\n with self.assertRaises(strax.DataCorrupted):\n assert st_new.is_stored(self.run_id, self.target)\n st_new.get_array(self.run_id, self.target)", "def test_anonymous_user_create_repeated_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n task = TaskFactory.create()\r\n taskrun1 = AnonymousTaskRunFactory.create(task=task)\r\n taskrun2 = AnonymousTaskRunFactory.build(task=task)\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').create,\r\n taskrun2)\r\n\r\n # But the user can still create taskruns for different tasks\r\n task2 = TaskFactory.create(app=task.app)\r\n taskrun3 = AnonymousTaskRunFactory.build(task=task2)\r\n assert_not_raises(Exception,\r\n getattr(require, 'taskrun').create,\r\n taskrun3)", "def reFragmentMissedTasks(missedTasks, options):\n options.chunk=1+(options.chunk*len(missedTasks)/options.splits)\n temporaryLocation=\"%s%stmp\"%(options.tmpDir,os.sep)\n os.makedirs(temporaryLocation)\n\n fileType = getFileType(options, None)\n\n # create a fileHandle-like object that will read all missed fragments\n inputsToReFragment=[getFragmentPath(options.tmpDir, options.fragBase, i) for i in missedTasks]\n logging.info(\"Restarting fragments: %s\" % missedTasks)\n logging.debug(\"Restarting fragments: %s\" % inputsToReFragment)\n failedRecordStream = fileinput.input(inputsToReFragment)\n\n # create new fragments in temporaryLocation\n newFragNum=fragmentInputStreamBySize(failedRecordStream, temporaryLocation,\n options.chunk, fileType,\n options.fragBase,\n splitOnSize=options.splitOnSize,\n suffix=options.fragSuff)\n\n # remove old fragments\n for i in missedTasks:\n frag = getFragmentPath(options.tmpDir, options.fragBase, i)\n os.remove(frag)\n\n return newFragNum+1", "def test_dupe_urls(self):\n with self.assertRaises(IntegrityError) as context:\n Bookmark.objects.create(name=\"Bookmark 2\",\n url=\"http://www.example.com\")\n self.assertTrue('UNIQUE constraint failed' in context.exception)", "def aknowledged_by_batchlog_set_when_batchlog_write_succeeds_test(self):\n cursor = self.prepare(nodes=3)\n # kill one of the nodes so that batchlog will be written, but the write will fail.\n self.cluster.nodelist()[-1].stop(gently=False)\n self.assert_timedout(cursor, \"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", ConsistencyLevel.THREE, acknowledged_by_batchlog=True)", "def test_retry_run(self):\n pass", "def test_batch_passed_through(self, get_task_mock, insert_tasks_mock):\n from sosbeacon.event.message import broadcast_to_groups\n\n group_key = Mock()\n group_key.id.return_value = 'SomeGroup'\n\n event_key = Mock()\n\n message_key = Mock()\n message_key.urlsafe.return_value = 'abc'\n\n broadcast_to_groups([group_key], event_key, message_key, '')\n\n self.assertEqual(insert_tasks_mock.call_count, 1)\n\n get_task_mock.assert_called_once_with(\n group_key, event_key, message_key, '')", "async def test_invalid_insert_no_user(database, valid_data):\n await database.setup_database(reset=True)\n for id,user_id,embeddings,batch_id in valid_data:\n try:\n await database.insert(id=id,\n user_id=user_id,\n embeddings=embeddings,\n batch_id=batch_id)\n assert False\n except(NotFoundError, DuplicateKeyError):\n assert True\n await database.close_pool()", "def test_exceptionWhenWorking(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=1, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n\n # Error\n yield DummyWorkItem.makeJob(\n txn, a=-1, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n\n # OK\n yield DummyWorkItem.makeJob(\n txn, a=2, b=0, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n yield setup\n clock.advance(20 - 12)\n\n # Work item complete\n self.assertTrue(DummyWorkItem.results == {1: 1, 3: 2})", "def test_exceptionUnassign(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Next, create failing work that's actually far enough into the past to run.\n yield DummyWorkItem.makeJob(\n txn, a=-1, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertTrue(len(jobs) == 1)\n self.assertTrue(jobs[0].assigned is None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertTrue(jobs[0].failed == 1)\n self.assertTrue(jobs[0].notBefore > datetime.datetime.utcnow())", "def test_exception_in_all_worker_process(self):\n pool = ProcessPool(5)\n pool.start(ExceptionGeneratingWorker_5)\n with self.assertRaises(RuntimeError):\n for _ in range(10000):\n pool.ventilate(\"Datanum\")\n time.sleep(.1)", "def _check_task(self, task: Task) -> bool:\n try:\n extents = list(fiemap(task.path, sync=task.frequency > 1))\n except OSError:\n self.logger.error('Error#%d %s', task.id, task.path, exc_info=True)\n return False\n\n if not extents:\n return False\n\n planner = Planner(self.planner_params, extents)\n clusters = planner.result()\n\n if not clusters:\n return False\n\n task.extents = extents\n task.clusters = clusters\n\n return True", "def test_chunk_size_has_priority_over_n_splits(self):\n chunks = list(chunk_tasks(range(4), chunk_size=4, n_splits=4))\n self.assertEqual(len(chunks), 1)\n self.assertEqual(len(chunks[0]), 4)\n self.assertEqual(list(range(4)), list(chain.from_iterable(chunks)))", "def test_add(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n new_task = self.task_storage.find(self.my_task.key)\n new_task.key = self.task_storage.add(new_task)\n\n self.assertNotEqual(self.my_task.key, new_task.key)\n self.task_storage.delete(new_task.key)", "def test_cant_allocate_partitioner(self):\n def just_raise(*a, **kw):\n raise ValueError(\"Something went wrong!\")\n self.client.SetPartitioner = just_raise\n partitioner = self.tx_client.SetPartitioner(\"xyzzy\", set([1, 2, 3]))\n self.assertTrue(partitioner.failed)", "def test_generator_without_iterable_len(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks(iter([]), n_splits=1))", "def test_aborting_batch(created_job, bulk_request):\n bulk_request.reset_mock()\n\n assert created_job.is_open\n\n created_job.abort()\n\n assert not created_job.job\n assert not created_job.job_url\n assert not created_job.pending_batches\n assert not created_job.is_open\n\n bulk_request.assert_called_once_with(\n 'post',\n 'https://salesforce/services/async/34.0/job/THEJOBID',\n data=XMLMatcher('''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <jobInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <state>Aborted</state>\n </jobInfo>\n '''),\n expected_response=200\n )", "def test_delete_write_fail(self):\n self.task_storage.add(self.my_task)\n os.chmod(self.test_task_filename, 0400)\n\n self.assertRaises(IOError, self.task_storage.delete, self.my_task.key)", "def validate_batch(self, *arg, **kwargs):\n pass", "def _createOwnPartition(self, databaseCursor, uniqueItems):\n self.logger.debug(\"%s - in createOwnPartition for %s\",threading.currentThread().getName(),self.name)\n for x in uniqueItems:\n #self.logger.debug(\"DEBUG - item value is %s\",x)\n partitionCreationParameters = self.partitionCreationParameters(x)\n partitionName = self.partitionNameTemplate % partitionCreationParameters[\"partitionName\"]\n if partitionWasCreated(partitionName):\n #self.logger.debug(\"DEBUG - skipping creation of %s\",partitionName)\n continue\n partitionCreationSql = self.partitionCreationSqlTemplate % partitionCreationParameters\n #self.logger.debug(\"%s - Sql for %s is %s\",threading.currentThread().getName(),self.name,partitionCreationSql)\n aPartition = Table(name=partitionName, logger=self.logger, creationSql=partitionCreationSql)\n self.logger.debug(\"%s - savepoint createPartitions_%s\",threading.currentThread().getName(), partitionName)\n databaseCursor.execute(\"savepoint createPartitions_%s\" % partitionName)\n try:\n self.logger.debug(\"%s - creating %s\", threading.currentThread().getName(), partitionName)\n aPartition._createSelf(databaseCursor)\n markPartitionCreated(partitionName)\n self.logger.debug(\"%s - successful - releasing savepoint\", threading.currentThread().getName())\n databaseCursor.execute(\"release savepoint createPartitions_%s\" % partitionName)\n except pg.ProgrammingError, x:\n self.logger.debug(\"%s -- Rolling back and releasing savepoint: Creating %s failed in createPartitions: %s\", threading.currentThread().getName(), partitionName, str(x).strip())\n databaseCursor.execute(\"rollback to createPartitions_%s; release savepoint createPartitions_%s;\" % (partitionName, partitionName))\n databaseCursor.connection.commit()", "def logged_batch_doesnt_throw_uae_test(self):\n cursor = self.prepare(nodes=3)\n self.cluster.nodelist()[-1].stop(wait_other_notice=True)\n cursor.execute(\"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", consistency_level=\"ANY\")\n assert True", "def test_temporaryFailure(self):\n dbpool, _ignore_qpool, clock, _ignore_performerChosen = self._setupPools()\n fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12)\n\n # Let's create a couple of work items directly, not via the enqueue\n # method, so that they exist but nobody will try to immediately execute\n # them.\n\n @transactionally(dbpool.pool.connection)\n @inlineCallbacks\n def setup(txn):\n # Next, create failing work that's actually far enough into the past to run.\n yield DummyWorkItem.makeJob(\n txn, a=-2, b=1, notBefore=fakeNow - datetime.timedelta(20 * 60)\n )\n yield setup\n clock.advance(20 - 12)\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return JobItem.all(txn)\n\n jobs = yield check\n self.assertTrue(len(jobs) == 1)\n self.assertTrue(jobs[0].assigned is None)\n self.assertEqual(jobs[0].isAssigned, 0)\n self.assertTrue(jobs[0].failed == 1)\n self.assertTrue(jobs[0].notBefore > datetime.datetime.utcnow() + datetime.timedelta(seconds=90))", "def test_task_add():\n pytest.fail('Not implemented yet.')", "def test_create_task_notebook_or_task_id_error(self):\n task_id = util.MOCK_UUID_1\n experiment_notebook = {\n \"cells\": [],\n \"metadata\": {},\n \"nbformat\": 4,\n \"nbformat_minor\": 4,\n }\n rv = TEST_CLIENT.post(\n TASK_ROUTE,\n json={\n \"copyFrom\": task_id,\n \"experimentNotebook\": experiment_notebook,\n },\n )\n result = rv.json()\n\n expected = {\n \"message\": \"Either provide notebooks or a task to copy from\",\n \"code\": \"MissingRequiredNotebookOrTaskId\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)", "def _fail_on_bad_torque_start(self):\n for bundle in self._model.batch_get_bundles(state=State.WAITING_FOR_WORKER_STARTUP, bundle_type='run'):\n failure_message = self._read_torque_error_log(bundle.metadata.job_handle)\n if failure_message is None and time.time() - bundle.metadata.last_updated > 20 * 60:\n failure_message = 'Worker failed to start. You may have requested too many resources.'\n if failure_message is not None:\n logger.info('Failing %s: %s', bundle.uuid, failure_message)\n self._model.update_bundle(\n bundle, {'state': State.FAILED,\n 'metadata': {'failure_message': failure_message}})", "def test_anonymous_03_respects_limit_tasks(self):\r\n # Del previous TaskRuns\r\n self.del_task_runs()\r\n\r\n assigned_tasks = []\r\n # Get Task until scheduler returns None\r\n for i in range(10):\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n while data.get('info') is not None:\r\n # Check that we received a Task\r\n assert data.get('info'), data\r\n\r\n # Save the assigned task\r\n assigned_tasks.append(data)\r\n\r\n # Submit an Answer for the assigned task\r\n tr = TaskRun(app_id=data['app_id'], task_id=data['id'],\r\n user_ip=\"127.0.0.\" + str(i),\r\n info={'answer': 'Yes'})\r\n db.session.add(tr)\r\n db.session.commit()\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n # Check if there are 30 TaskRuns per Task\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n for t in tasks:\r\n assert len(t.task_runs) == 10, len(t.task_runs)\r\n # Check that all the answers are from different IPs\r\n err_msg = \"There are two or more Answers from same IP\"\r\n for t in tasks:\r\n for tr in t.task_runs:\r\n assert self.is_unique(tr.user_ip, t.task_runs), err_msg", "def test_merge_backup_with_failover_logs(self):\n self.log.info(\"Load 1st batch docs\")\n create_gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen1, \"create\", 0)\n failed_persisted_bucket = []\n rest = RestConnection(self.master)\n cluster_nodes = rest.get_nodes()\n for bucket in self.buckets:\n ready = RebalanceHelper.wait_for_stats_on_all(self.backupset.cluster_host,\n bucket.name, 'ep_queue_size',\n 0, timeout_in_seconds=120)\n if not ready:\n failed_persisted_bucket.append(bucket.name)\n if failed_persisted_bucket:\n self.fail(\"Buckets %s did not persisted.\" % failed_persisted_bucket)\n self.log.info(\"Stop persistence at each node\")\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for bucket in self.buckets:\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n bucket.name))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n create_gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen2, \"create\", 0)\n self.sleep(5)\n self.log.info(\"Crash cluster via kill memcached\")\n for node in clusters:\n for server in self.servers:\n if node.ip == server.ip:\n num_entries = 4\n reach_num_entries = False\n while not reach_num_entries:\n shell = RemoteMachineShellConnection(server)\n shell.kill_memcached()\n ready = False\n while not ready:\n if not RestHelper(RestConnection(server)).is_ns_server_running():\n self.sleep(10)\n else:\n ready = True\n cmd = \"%scbstats%s %s:11210 failovers -u %s -p %s | grep num_entries \" \\\n \"| gawk%s '{printf $2}' | grep -m 5 '4\\|5\\|6\\|7'\" \\\n % (self.cli_command_location, self.cmd_ext, server.ip,\n \"cbadminbucket\", \"password\", self.cmd_ext)\n output, error = shell.execute_command(cmd)\n shell.disconnect()\n if output:\n self.log.info(\"number failover logs entries reached. %s \" % output)\n reach_num_entries = True\n self.backup_create()\n self.log.info(\"Start backup data\")\n self.backup_cluster()\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n self.log.info(\"Load 3rd batch docs\")\n create_gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen3, \"create\", 0)\n self.backup_cluster()\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)", "def test_add_write_fail(self):\n os.chmod(self.test_task_filename, 0400)\n\n self.assertRaises(IOError, self.task_storage.add, self.my_task)", "def test_block_extra_batch(self):\n pass", "def test_user_03_respects_limit_tasks(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n assigned_tasks = []\r\n # We need one extra loop to allow the scheduler to mark a task as completed\r\n for i in range(11):\r\n self.register(fullname=self.user.username + str(i),\r\n name=self.user.username + str(i),\r\n password=self.user.username + str(i))\r\n self.signin()\r\n # Get Task until scheduler returns None\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n while data.get('info') is not None:\r\n # Check that we received a Task\r\n assert data.get('info'), data\r\n\r\n # Save the assigned task\r\n assigned_tasks.append(data)\r\n\r\n # Submit an Answer for the assigned task\r\n tr = dict(app_id=data['app_id'], task_id=data['id'],\r\n info={'answer': 'No'})\r\n tr = json.dumps(tr)\r\n self.app.post('/api/taskrun', data=tr)\r\n\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n self.signout()\r\n\r\n # Check if there are 30 TaskRuns per Task\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n for t in tasks:\r\n assert len(t.task_runs) == 10, t.task_runs\r\n # Check that all the answers are from different IPs\r\n err_msg = \"There are two or more Answers from same User\"\r\n for t in tasks:\r\n for tr in t.task_runs:\r\n assert self.is_unique(tr.user_id, t.task_runs), err_msg\r\n # Check that task.state is updated to completed\r\n for t in tasks:\r\n assert t.state == \"completed\", t.state", "def create_task(conn):\r\n for i in range(0, len(s), 3):\r\n sql = ''' INSERT OR IGNORE INTO tasks(id,city_name,state)\r\n VALUES(?,?,?) '''\r\n task = (s[i],s[i+1],s[i+2])\r\n cur = conn.cursor()\r\n cur.execute(sql,task)\r\n conn.commit()\r\n return \"done\"", "async def test_exectution_limit_once(coresys: CoreSys, loop: asyncio.BaseEventLoop):\n\n class TestClass:\n \"\"\"Test class.\"\"\"\n\n def __init__(self, coresys: CoreSys):\n \"\"\"Initialize the test class.\"\"\"\n self.coresys = coresys\n self.run = asyncio.Lock()\n\n @Job(limit=JobExecutionLimit.ONCE, on_condition=JobException)\n async def execute(self, sleep: float):\n \"\"\"Execute the class method.\"\"\"\n assert not self.run.locked()\n async with self.run:\n await asyncio.sleep(sleep)\n\n test = TestClass(coresys)\n run_task = loop.create_task(test.execute(0.3))\n\n await asyncio.sleep(0.1)\n with pytest.raises(JobException):\n await test.execute(0.1)\n\n await run_task", "def test_instantiating_salesforce_bulk_job_validates_object(salesforce_session, bulk_request):\n with pytest.raises(AssertionError):\n SalesforceBulkJob('update', 'lead')\n with pytest.raises(AssertionError):\n SalesforceBulkJob('update', 'Floob')", "def _test_retry_after_limited_retry_error(self, exception):\r\n # If we want the batch to succeed, we need to send fewer emails\r\n # than the max retries, so that the max is not triggered.\r\n num_emails = settings.BULK_EMAIL_MAX_RETRIES\r\n # We also send email to the instructor:\r\n self._create_students(num_emails - 1)\r\n expected_fails = 0\r\n expected_succeeds = num_emails\r\n with patch('bulk_email.tasks.get_connection', autospec=True) as get_conn:\r\n # Have every other mail attempt fail due to disconnection.\r\n get_conn.return_value.send_messages.side_effect = cycle([exception, None])\r\n self._test_run_with_task(\r\n send_bulk_course_email,\r\n 'emailed',\r\n num_emails,\r\n expected_succeeds,\r\n failed=expected_fails,\r\n retried_withmax=num_emails\r\n )", "def testMarkTaskAsMerging(self):\n redis_client = self._CreateRedisClient()\n\n session = sessions.Session()\n task = tasks.Task(session_identifier=session.identifier)\n\n # Trying to mark a task as merging without finalizing it raises an error.\n with self.assertRaises(IOError):\n redis_store.RedisStore.MarkTaskAsMerging(\n task.identifier, session.identifier, redis_client=redis_client)\n\n # Opening and closing a writer for a task should cause the task to be marked\n # as complete.\n storage_writer = writer.RedisStorageWriter(\n storage_type=definitions.STORAGE_TYPE_TASK)\n storage_writer.Open(\n redis_client=redis_client, session_identifier=task.session_identifier,\n task_identifier=task.identifier)\n storage_writer.Close()\n\n redis_store.RedisStore.MarkTaskAsMerging(\n task.identifier, session.identifier, redis_client=redis_client)", "def test_add_raises_catch():\n with pytest.raises(TypeError):\n tasks.add(task='not a Task object')", "def test_update_write_fail(self):\n self.task_storage.add(self.my_task)\n os.chmod(self.test_task_filename, 0400)\n\n self.assertRaises(IOError, self.task_storage.update, self.my_task)", "def prepare_run(self, **kwargs):\n super().prepare_run(**kwargs)\n with open(\n os.path.join(self.rally_dir, 'rally_jobs.yaml'),\n 'r', encoding='utf-8') as task_file:\n self.task_yaml = yaml.safe_load(task_file)\n\n for task in self.task_yaml:\n if task not in self.tests:\n raise Exception(f\"Test '{task}' not in '{self.tests}'\")", "def test_spanner_indexer_insert_batch_no_conflict_does_not_trigger_individual_inserts(\n mock, testing_indexer\n):\n codec = IdCodec()\n\n model1_id = get_id()\n key_results1 = KeyResults()\n model1 = SpannerIndexerModel(\n id=codec.encode(model1_id),\n decoded_id=model1_id,\n string=get_random_string(10),\n organization_id=55555,\n date_added=datetime.now(),\n last_seen=datetime.now(),\n retention_days=55,\n )\n testing_indexer._insert_db_records(UseCaseKey.PERFORMANCE, [model1], key_results1)\n\n # Insert the same record with a different id but the key result would\n # have the id of model1.\n key_results2 = KeyResults()\n model2_id = get_id()\n model2 = SpannerIndexerModel(\n id=codec.encode(model2_id),\n decoded_id=model2_id,\n string=get_random_string(10),\n organization_id=55556,\n date_added=datetime.now(),\n last_seen=datetime.now(),\n retention_days=55,\n )\n testing_indexer._insert_db_records(UseCaseKey.PERFORMANCE, [model2], key_results2)\n assert mock.call_count == 0, \"Insert with collisions should not be called\"", "def testSiblingDAGConsistency(self):\n options = Job.Runner.getDefaultOptions(self._createTempDir() + '/jobStore')\n options.clean = 'always'\n options.logLevel = 'debug'\n i = Job.wrapJobFn(diamond)\n with Toil(options) as toil:\n try:\n toil.start(i)\n except FailedJobsException:\n # we expect this exception to be raised\n pass\n else:\n self.fail()" ]
[ "0.62573755", "0.620488", "0.6070838", "0.6027224", "0.6025703", "0.5959347", "0.571253", "0.5664554", "0.5623779", "0.5445145", "0.53211784", "0.5283602", "0.52729964", "0.52518106", "0.5234075", "0.5233979", "0.52330583", "0.51978356", "0.51888645", "0.518777", "0.51753086", "0.51722765", "0.5144729", "0.5144445", "0.5138327", "0.5137644", "0.513666", "0.5125125", "0.5122854", "0.51195884", "0.51159376", "0.5115627", "0.5114994", "0.5095382", "0.5090225", "0.50858593", "0.5080037", "0.5073849", "0.5067042", "0.506202", "0.5054793", "0.5053038", "0.50454813", "0.50451374", "0.5039828", "0.503855", "0.5037011", "0.5035938", "0.50257045", "0.5023945", "0.50176823", "0.5014915", "0.5004062", "0.5001319", "0.50001717", "0.49918973", "0.49914482", "0.49760863", "0.495937", "0.4943711", "0.49389148", "0.49324647", "0.4919772", "0.49134165", "0.4909688", "0.4907237", "0.49068457", "0.49053636", "0.49039933", "0.4899926", "0.48965362", "0.48895893", "0.48863164", "0.48861238", "0.48851535", "0.4883738", "0.4883707", "0.48832288", "0.48831567", "0.48828638", "0.48793283", "0.48765007", "0.48710176", "0.48684382", "0.48673937", "0.48646215", "0.48595086", "0.48542103", "0.48506427", "0.48497257", "0.4843036", "0.48389384", "0.48387033", "0.4838027", "0.48351473", "0.4831069", "0.48310456", "0.48224583", "0.4820628", "0.48201424" ]
0.6667976
0
Ensure a date with no hours / minutes is retuned as a date.
def test_date(self): from sosbeacon.utils import format_datetime date = datetime(year=2012, month=8, day=30) encoded = format_datetime(date) self.assertEqual('08/30/12', encoded)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insure_date(d):\n if isinstance(d, BeautifulDate):\n return date(year=d.year, month=d.month, day=d.day)\n else:\n return d", "def fake_date_without_day(value):\n return date(year=value[0], month=value[1], day=1)", "def ensure_date(value: Union[Date, DateTime, str], **kwargs: int) -> Date:\n return ensure_datetime(value, **kwargs).date()", "def _cleanup_date(self, date):\n if (not date or\n not isinstance(date, str) or\n 'N/A' in date):\n return None\n\n date, time = date.split()\n day, month, year = date.split('.')\n hour, minute = time.split(':')\n year, month, day, hour, minute = [int(x) for x in (year, month, day, hour, minute)]\n\n # For 2-digit years, 1969/2068 is the wrap-around (POSIX standard)\n if (69 <= year < 100):\n year += 1900\n elif (0 <= year < 69):\n year += 2000\n\n return datetime.datetime(year, month, day, hour, minute)", "def is_date(dt):\n return isinstance(dt, datetime.date) and not isinstance(dt, datetime.datetime)", "def _as_date(self):\n if not self.is_complete():\n raise ValueError('Comparison of incomplete dates is not yet '\n 'supported.')\n return date(self.year, self.month, self.day)", "def _get_datetime_or_error(self) -> dt:\n return dt(\n *(self.date[key] for key in ['year', 'month', 'day'] if key in self.date)\n )", "def validate_date(date_given, date_format=\"%Y-%m-%d\"):\n # FIXME: use datetime.datetime.strptime(date_given, \"%Y-%m-%d\")\n # after upgrading Python => 2.5\n return datetime.datetime(*(time.strptime(date_given, date_format)[0:6]))", "def ensure_datetime(ob: AnyDatetime) -> datetime.datetime:\n if isinstance(ob, datetime.datetime):\n return ob\n date = cast(datetime.date, ob)\n time = cast(datetime.time, ob)\n if isinstance(ob, datetime.date):\n time = datetime.time()\n if isinstance(ob, datetime.time):\n date = datetime.date(1900, 1, 1)\n return datetime.datetime.combine(date, time)", "def test_date_with_zero_hours(self):\n from sosbeacon.utils import format_datetime\n\n date = datetime(year=2012, month=8, day=30, hour=0, minute=13)\n encoded = format_datetime(date)\n self.assertEqual('08/30/12 00:13', encoded)", "def test_date_change_fails_on_naive_datetime(self):\n event = Event(\n Guild(12345),\n 'Some title',\n datetime(2020, 10, 10, 10, 10, tzinfo=utc),\n 'Some description')\n with self.assertRaises(ValueError):\n event.date = datetime(2020, 10, 10, 12, 10)", "def force_to_date(val):\n if not val:\n return val\n elif isinstance(val, datetime.datetime):\n return val.date()\n elif isinstance(val, datetime.date):\n return val\n elif isinstance(val, str):\n return string_to_datetime(val).date()\n else:\n raise ValueError(\"object must be date or datetime!\")", "def test_date_with_zero_minutes(self):\n from sosbeacon.utils import format_datetime\n\n date = datetime(year=2012, month=8, day=30, hour=19, minute=0)\n encoded = format_datetime(date)\n self.assertEqual('08/30/12 19:00', encoded)", "def datetime_object(__date):\n if isinstance(__date, datetime.datetime):\n return datetime.datetime(__date.year, __date.month, __date.day, __date.hour, __date.minute, __date.second)\n return None", "def test_datetime_with_naive_duedate_only_fails(self):\n # setup\n specify_wd = self.project.activity('Specify wheel diameter')\n\n # save old values\n old_start, old_due = datetime.strptime(specify_wd._json_data.get('start_date'), ISOFORMAT), \\\n datetime.strptime(specify_wd._json_data.get('due_date'), ISOFORMAT)\n naive_duedate = datetime(2017, 6, 5, 5, 0, 0)\n with warnings.catch_warnings(record=False) as w:\n warnings.simplefilter(\"ignore\")\n specify_wd.edit(due_date=naive_duedate)\n\n # teardown\n with warnings.catch_warnings(record=False) as w:\n warnings.simplefilter(\"ignore\")\n specify_wd.edit(due_date=old_due)", "def check_date(date):\n import datetime\n correctDate = None\n date = str(date)\n \n if (len(date)!=8):\n return False\n year = int(date[0:4])\n month = int(date[4:6])\n day = int(date[6:8])\n try:\n datetime.datetime(year,month,day)\n correctDate = True\n except ValueError:\n correctDate = False\n return correctDate", "def check_date(date):\n\timport datetime\n\tif date < datetime.date.today():\n\t\treturn date\n\telse:\n\t\treturn datetime.date.today()", "def test_non_std_from_json(self):\r\n now = datetime.datetime.now(UTC())\r\n delta = now - datetime.datetime.fromtimestamp(0, UTC())\r\n self.assertEqual(DateTest.date.from_json(delta.total_seconds() * 1000),\r\n now)\r\n yesterday = datetime.datetime.now(UTC()) - datetime.timedelta(days=-1)\r\n self.assertEqual(DateTest.date.from_json(yesterday), yesterday)", "def _to_date(self, x):\n if isinstance(x, datetime.datetime):\n return x.date()\n return x", "def to_date_or_none(value: Optional[Union[datetime.date, str]]) -> Optional[datetime.date]:\n if isinstance(value, datetime.date):\n return value\n if value is None or value == '000000':\n return None\n return datetime.datetime.strptime(value, '%d%m%y').date()", "def test_missing_report_datetime(self):\n self.assertEqual(datetime.datetime.min, self.__uft.datetime('raise'))", "def test_8_digit_date_detection(self):\n obj = awstats_reader.awstats_datetime('20091130')\n self.assertTrue(isinstance(obj, awstats_reader.AwstatsDate))", "def test_as_date(self):\n self.assertEqual(\n time_display.as_date(\n datetime(2020, 7, 31, 23, 59, 30, 357921)),\n '2020-07-31')", "def test_8_digit_date(self):\n obj = awstats_reader.awstats_datetime('20091130')\n dt = datetime.date(2009, 11, 30)\n self.assertEqual(obj, dt)", "def to_datetime(date: Union[dt.datetime, dt.date]) -> dt.datetime:\n if isinstance(date, dt.datetime):\n return dt.datetime(date.year, date.month, date.day, date.hour, date.minute, date.second)\n elif isinstance(date, dt.date):\n return dt.datetime(date.year, date.month, date.day)\n else:\n raise ValueError(\"<{0}>'s type is not recognized. Its type is <{1}>\".format(date, type(date)))", "def valid_date(date):\n import datetime\n try:\n datetime.datetime.strptime(date, '%Y-%m-%d')\n except ValueError:\n raise ValueError(\"Incorrect data format, should be YYYY-MM-DD\")", "def _fill_date(self):\n if not self.date['year']:\n self.date['year'] = self.DEFAULT_DATE['year']\n if not self.date['month']:\n self.date['month'] = self.DEFAULT_DATE['month']\n if not self.date['day']:\n self.date['day'] = self.DEFAULT_DATE['day']", "def test_none_handling(self):\r\n dt = DatetimeTest.create(test_id=0, created_at=None)", "def normalize_date(__date, type='arrival'):\n\n if isinstance(__date, datetime.datetime):\n # If type is arrival pass RESERVATION_START_TIME as tup else RESERVATION_END_TIME as tup\n if type == 'arrival':\n tup = RESERVATION_START_TIME\n else:\n tup = RESERVATION_END_TIME\n\n __date = datetime.datetime(__date.year, __date.month, __date.day,\n tup[0], tup[1], tup[2])\n\n return __date\n return None", "def test_none_handling(self):\n dt = DatetimeTest.create(test_id=0, created_at=None)", "def valid_date(s):\n try:\n return datetime.datetime.strptime(s, \"%Y-%m-%d\")\n except ValueError:\n print(\"Error: Not a valid date: '{0}'.\".format(s))\n\n return", "def test_instantiation_fail_on_naive_datetime(self):\n with self.assertRaises(ValueError):\n Event(\n Guild(12345),\n 'Some title',\n datetime(2020, 10, 10, 10, 10), # Missing timezone\n 'Some description')", "def test_one_off_training_date(self):\n self.assertIsInstance(self.one_off_training.date, datetime.date)\n self.assertEqual(\n self.one_off_training.date,\n datetime.date(2020, 6, 14)\n )", "def test_validate_date_entry_returns_correct_iso_date(self):\n date_string = \"2018-01-21\"\n date_format = settings.DATE_FORMATS['iso 8601']\n date_object = datetime.datetime.strptime(\n date_string,\n date_format['datetime format'])\n\n result = self.menu.validate_date_entry(date_string, date_format)\n\n expected_result = (None, date_object)\n\n self.assertEqual(result, expected_result)", "def fromdate(cls, date_: date):\n if date_ is None:\n return None\n else:\n return cls(date_.year, date_.month, date_.day)", "def get_date(d):\n return Date(d[\"year\"][\"value\"], d[\"month\"][\"value\"] if d[\"month\"] else None,\n d[\"day\"][\"value\"] if d[\"day\"] else None)", "def representsValidDate(self):\n return _libsbml.Date_representsValidDate(self)", "def parse_date(date) -> datetime:\n\n if type(date) == datetime:\n return date\n try:\n date_object = datetime.strptime(date.replace(\" \", \"\"), \"%m/%d/%Y\")\n except (TypeError, ValueError) as exc:\n print(\"Cannot format time \" + str(exc), file=sys.stderr)\n return None\n return date_object", "def check_today(self):\n import time\n _time = time.time\n time.time = lambda: 1003539807.89\n try:\n assert Date(\"today\") == Date(\"10/19/2001\"), \"wrong date\"\n finally:\n time.time = _time", "def date_temporal_paradox_free(self):\n valid_date = True\n new_val = self.date_edit.text()\n datetime_object = datetime.strptime(new_val, \"%Y-%m-%d\")\n\n if datetime_object > datetime.now():\n valid_date = False\n return valid_date", "def naive(self):\n return self.datetime.replace(tzinfo=None)", "def test_hotshot_check_date_error(self):\n try:\n check_date('N/A', 'N/A', '20.11.2015')\n except ValueError as error:\n self.assertEqual(type(error), ValueError)", "def convert_date(date):\n date = get_nummeric_only(date) \n \n \n if len(date) == 8:\n\n year = int(date[:4]) \n month = int(date[4:6])\n day = int(date[6:8])\n \n date_time = dt.datetime(year,month,day)\n \n return date_time\n \n if len(date) == 12 or len(date) == 14:\n\n year = int(date[:4]) \n month = int(date[4:6])\n day = int(date[6:8])\n hour = int(date[8:10])\n minute = int(date[10:12])\n \n date_time = dt.datetime(year,month,day, hour, minute)\n \n return date_time\n else:\n return 0", "def date_or_none(date_str: str | None | dt.date | dt.datetime) -> dt.date | None:\n\n if not date_str:\n return None\n\n if isinstance(date_str, dt.datetime):\n return date_str.date()\n\n if isinstance(date_str, dt.date):\n return date_str\n\n if \" \" in date_str and len(date_str) > 10:\n return dt.datetime.strptime(date_str, \"%d %B %Y\").date()\n\n p_date_str = date_str.replace(\"/\", \"-\").replace(\".\", \"-\")\n date_split = p_date_str.split(\"-\")\n\n if len(date_split) > 3 or len(date_split[-1]) > 4:\n raise ValidationError(f\"Date {date_str} not in parsable format\")\n\n if len(date_split[0]) == 4:\n date_format = \"%Y-%m-%d\"\n elif len(date_split[-1]) == 4:\n date_format = \"%d-%m-%Y\"\n else:\n date_format = \"%d-%m-%y\"\n\n return dt.datetime.strptime(p_date_str, date_format).date()", "def test_dt_obj_to_date(settings):\n # Set the TIME_ZONE in the settings.\n settings.TIME_ZONE = \"America/New_York\"\n\n # A datetime that refers to 20:00 on January 1, 2018 in New York.\n datetime_2018_01_01_2000_ny = make_aware(\n datetime(year=2018, month=1, day=1, hour=20, minute=0, second=0),\n timezone=pytz.timezone(\"America/New_York\"),\n )\n # A datetime that refers to 01:00 on January 2, 2018 in UTC. Note: this is\n # the same exact moment in time as datetime_2018_01_01_2000_ny.\n datetime_2018_01_02_0100_utc = make_aware(\n datetime(year=2018, month=1, day=2, hour=1, minute=0, second=0),\n timezone=pytz.utc,\n )\n # Calling dt_obj_to_date() returns the date at each of these moments in the\n # \"America/New_York\" timezone, which was \"2018-01-01\".\n assert dt_obj_to_date(datetime_2018_01_01_2000_ny) == date(\n year=2018, month=1, day=1\n )\n assert dt_obj_to_date(datetime_2018_01_02_0100_utc) == date(\n year=2018, month=1, day=1\n )\n\n # Calling dt_obj_to_date() for non datetime objects returns None.\n dt_obj = \"A random string.\"\n date_obj = dt_obj_to_date(dt_obj)\n assert date_obj == None\n dt_obj = 123438\n date_obj = dt_obj_to_date(dt_obj)\n assert date_obj == None\n dt_obj = {\"dict\": \"random dict\"}\n date_obj = dt_obj_to_date(dt_obj)\n assert date_obj == None\n dt_obj = None\n date_obj = dt_obj_to_date(dt_obj)\n assert date_obj == None", "def test_non_input(self):\n from sosbeacon.utils import format_datetime\n\n encoded = format_datetime(None)\n self.assertEqual('', encoded)", "def test_unique_for_date_with_nullable_date(self):\n FlexibleDatePost.objects.create(\n title=\"Django 1.0 is released\",\n slug=\"Django 1.0\",\n subtitle=\"Finally\",\n posted=datetime.date(2008, 9, 3),\n )\n p = FlexibleDatePost(title=\"Django 1.0 is released\")\n p.full_clean()\n\n p = FlexibleDatePost(slug=\"Django 1.0\")\n p.full_clean()\n\n p = FlexibleDatePost(subtitle=\"Finally\")\n p.full_clean()", "def force_to_datetime(val):\n if not val:\n return val\n elif isinstance(val, datetime.datetime):\n return val\n elif isinstance(val, datetime.date):\n return datetime.datetime.combine(val, datetime.time())\n elif isinstance(val, str):\n return string_to_datetime(val)\n else:\n raise ValueError(\"object must be date or datetime!\")", "def validate_date(value):\n if date_regex.fullmatch(value):\n return True\n else:\n return False", "def filter_to_date(date_time_val):\n if not isinstance(date_time_val, (datetime, date, time)):\n return date_time_val\n return date_time_val.date()", "def get_date_or_none(date_str, date_format='%Y-%m-%d'):\n try:\n return datetime.strptime(date_str, date_format).date()\n except (ValueError, TypeError):\n return None", "def from_date(cls, date: dt.date) -> Date:\n return cls(date.year, date.month, date.day)", "def Date(year, month, day):\r\n return datetime.datetime(year, month, day, 0, 0)", "def _date_to_datetime(value):\r\n assert isinstance(value, datetime.date)\r\n return datetime.datetime(value.year, value.month, value.day)", "def naturaldate(value):\r\n try:\r\n value = date(value.year, value.month, value.day)\r\n except AttributeError:\r\n # Passed value wasn't date-ish\r\n return value\r\n except (OverflowError, ValueError):\r\n # Date arguments out of range\r\n return value\r\n delta = abs_timedelta(value - date.today())\r\n if delta.days >= 365:\r\n return naturalday(value, '%b %d %Y')\r\n return naturalday(value)", "def make_datetime(value):\n if value:\n return value\n return None", "def convert_date_to_datetime(date):\n return datetime.combine(date, dtime()) if date else None", "def get_date(date):\n return date", "def get_date():\n return datetime(2000, 1, 1, 0, 0, 0, FLOOD_TIMEOUT+1)", "def test_convert_to_eod_datetime(self):\n assert convert_to_eod_datetime(date(2020, 1, 1)) == datetime(\n 2020, 1, 1, 23, 59, 59, 999999\n )\n assert convert_to_eod_datetime(datetime(2020, 1, 1)) == datetime(\n 2020, 1, 1, 23, 59, 59, 999999\n )\n assert convert_to_eod_datetime(datetime(2020, 1, 1, 2, 30)) == datetime(\n 2020, 1, 1, 2, 30\n )\n assert convert_to_eod_datetime(\"foo\") == \"foo\"", "def validate_date(date):\n\n if isinstance(date, datetime):\n try:\n date = dto_to_str(date)\n except ValueError:\n pass # What to do in this case?\n else:\n return date\n\n if isinstance(date, str) or isinstance(date, unicode):\n try: # Convert to dto then back to string to ensure format is as expected\n date = str_to_dto(date)\n date = dto_to_str(date)\n except ValueError:\n pass\n else:\n return date\n\n raise DataValidationError(\"Date, {}, is not of an expected type (datetime object or string in format YYYYMMDD or MM/DD/YYYY\".format(date))", "def knowledge_date_valid(record):\n today = datetime.now(timezone.utc).date().strftime(\"%Y-%m-%d\")\n gen_date = record['knowledge_date'].strftime(\"%Y-%m-%d\")\n assert gen_date == today", "def _validate(year, month, day):\n if day is not None and month is None:\n raise ValueError(\"Day without month\")\n if day is None:\n day = 1\n if month is None:\n month = 1\n if year is None:\n year = 2000\n # actual validation happens here\n datetime.date(year, month, day)", "def omniscient_datetime(*args):\n d = original_datetime(*args)\n if settings.USE_TZ:\n d = timezone.make_aware(d, timezone.utc)\n return d", "def is_valid_date(date):\n try:\n datetime.strptime(date, '%Y-%m-%d')\n return True\n except (ValueError, TypeError):\n return False", "def test_init_invalid_retire_date(self):\n # Whether this raises a ValueError or TypeError is an\n # implementation detail delegated to `datetime`\n with self.assertRaises((ValueError, TypeError)):\n Person(\n self.initial_year, self.name, self.birth_date,\n retirement_date='invalid')", "def make_datetime_from_dicom_date(date: str, time: str = None) -> Optional[datetime]:\n try:\n return datetime(\n year=int(date[:4]),\n month=int(date[4:6]),\n day=int(date[6:8]),\n hour=int(time[:2]),\n minute=int(time[2:4]),\n second=int(time[4:6])\n )\n except (ValueError, TypeError):\n try:\n return datetime(\n year=int(date[:4]),\n month=int(date[4:6]),\n day=int(date[6:8]),\n )\n except (ValueError, TypeError):\n return None", "def _check_date_not_in_future(self, date):\n if date is None:\n pass\n else:\n assert (\n date <= datetime.datetime.now()\n ), \"Provided date cannot be in the future\"", "def any2datetime_date(d):\n return datetime.date(d.year, d.month, d.day)", "def test_parse_no_timezone_no_strict():\n d = iso8601.parse_datetime(\"2007-01-01T08:00:00\", strict=False)\n assert d.year == 2007\n assert d.month == 1\n assert d.day == 1\n assert d.hour == 8\n assert d.minute == 0\n assert d.second == 0\n assert d.microsecond == 0\n assert d.tzinfo == iso8601.UTC", "def test_datetime_when_some_checks_have_no_date(self, mock_url_read):\n mock_url_read.return_value = \\\n '{\"value\": [{\"LastScan\": {\"ScanCompletedOn\": \"2016-12-14T00:01:30.737+01:00\", ' \\\n '\"Comment\": \"Attempt to perform scan on 2/13/2017 8:00:06 PM - No code changes were ' \\\n 'detected; No code changes were detected No code changes were detected\"}}]}'\n self.assertEqual(datetime.datetime(2017, 2, 13, 20, 0, 6), self.__report.datetime('id'))\n mock_url_read.assert_called_once_with(\n 'http://url/Cxwebinterface/odata/v1/Projects?$expand=LastScan&$filter=Name%20eq%20%27id%27')", "def get_date(self):\n return datetime.date(\n int(self.kwargs['year']),\n int(self.kwargs['month']),\n int(self.kwargs['day'])\n )", "def verify_date_field(field, data: str) -> str:\n if field in ['started_at', 'ended_at', 'created_at', 'performed_at', 'issued_at', 'expires_at']:\n content = string_to_date(data)\n else:\n content = data\n\n return content", "def test_parse_date(\n test_input: typing.Optional[str],\n expected: datetime.date,\n):\n assert tvmaze.parsers.parse_date(test_input) == expected", "def valid_date(input_date):\n try:\n input_dt = dt.datetime.strptime(input_date, \"%Y-%m-%d\")\n return input_date\n except ValueError:\n msg = \"Not a valid date: '{0}'.\".format(input_date)\n raise argparse.ArgumentTypeError(msg)", "def date(self, year: Number, month: Number, day: Number) -> Date:\n return Date(year, month, day) # type: ignore", "def test_hotshot_check_date(self):\n date_first = check_date('2015-11-03 13:21:02.071381', '03.11.2015', '20.11.2015')\n date_second = check_date('2015-11-03 13:21:02.071381', '01.11.2015', '02.11.2015')\n\n self.assertTrue(date_first)\n self.assertFalse(date_second)", "def enforce_valid_dates(arg):\n year_formats = (\n '%Y-%m-%d',\n '%Y%m%d',\n '%d',\n '%j',\n )\n\n for yf in year_formats:\n try:\n return datetime.strptime(str(arg), yf)\n except ValueError:\n pass\n\n\n raise ValueError(\n 'Unable to coerce {} to a date. Try %Y-%m-%d'.format(arg)\n )", "def convert_date(self, dt: datetime) -> Union[datetime, Function]:\n return dt", "def convert_date(date):\n if isinstance(date, datetime.date):\n return date\n elif isinstance(date, str):\n match = DATE_PATTERN.match(date)\n if match:\n groups = match.groups()\n if len(groups) == 3:\n return datetime.date(\n year=int(\n groups[0]), month=int(\n groups[1]), day=int(\n groups[2]))\n return None", "def test_14_digit_datetime_detection(self):\n obj = awstats_reader.awstats_datetime('20091130165230')\n self.assertTrue(isinstance(obj, awstats_reader.AwstatsDateTime))", "def from_date(cls, d):\n raise NotImplementedError", "def __init__(self, date_datetime_or_none=_marker,\n year=None, month=None, day=None):\n if date_datetime_or_none is not _marker:\n if isinstance(date_datetime_or_none, (datetime, date)):\n self.year = date_datetime_or_none.year\n self.month = date_datetime_or_none.month\n self.day = date_datetime_or_none.day\n elif date_datetime_or_none is None:\n self.year = self.month = self.day = None\n else:\n raise TypeError(\"Can't construct a NullableDate out of %s.\" % (\n date_datetime_or_none,))\n else:\n self.year = year\n self.month = month\n self.day = day", "def convert_date_to_datetime(date_obj: date) -> datetime:\n # REF: https://stackoverflow.com/a/11619200\n assert isinstance(date_obj, date), \"Not a date object.\"\n # return the original value if the input is a datetime object\n if isinstance(date_obj, datetime):\n return date_obj\n return datetime.combine(date_obj, time())", "def check_dates(dates):\n for date in dates:\n if type(date) != datetime.datetime:\n raise TypeError('Input date, %s, not datetime object' % date)", "def create_date():\n dt = fake.date_object()\n\n try:\n human_readable = format_date(dt, format=random.choice(FORMATS), locale=random.choice(LOCALES))\n\n case_change = random.choice([0,1,2])\n if case_change == 1:\n human_readable = human_readable.upper()\n elif case_change == 2:\n human_readable = human_readable.lower()\n # if case_change == 0, do nothing\n\n machine_readable = dt.isoformat()\n except AttributeError as e:\n return None, None, None\n\n return human_readable, machine_readable, dt", "def get_simulate_date(start, end):\n start_year, start_month, start_day = parse_string_datetime(start)\n end_year, end_month, end_day = parse_string_datetime(end)\n if simulatedate_checkinput(start, end) == 0:\n start_time = datetime.datetime(start_year, start_month, start_day)\n end_time = datetime.datetime(end_year, end_month, end_day)\n return start_time, end_time", "def is_datetime(self):\n answer = self._call('is_datetime')\n return answer.yes", "def test_14_digit_datetime(self):\n obj = awstats_reader.awstats_datetime('20091130165230')\n dt = datetime.datetime(2009, 11, 30, 16, 52, 30)\n self.assertEqual(obj, dt)", "def __nonzero__(self):\n return not (self.year is None and\n self.month is None and\n self.day is None)", "def test_sample_one_date(self):\r\n self.assertEqual(self.test_sample.date, datetime.datetime(2016, 2, 12, 7, 34, 26))", "def get_date_or_none(obj, key):\n try:\n return datetime.strptime(obj[key], '%Y-%m-%d')\n except (KeyError, ValueError):\n return None", "def is_datetime(self) -> bool:\n return False", "def date_validity(self):\n return self._date_validity", "def _get_normal_date(self, args):\n\n func1, func2, func3 = args\n self.assertIsNotNone(func1(20130201, \"20190120\"))\n self.assertIsNotNone(func2(\"2013/02/01\", \"2019-01-20\"))\n self.assertIsNotNone(func3(r\"2013-/\\-02~@-\\/-@~01\",\n pd.to_datetime('2019-01-20')))", "def _date_from_str(self, date_entry, date_str):\n dt_obj = None\n if date_str:\n dt_obj = parser.parse(date_str)\n if dt_obj < MIN_DATE or dt_obj > MAX_DATE:\n prompt = 'Please keep dates within Jan 1, 2015 up to today.'\n raise ValueError(prompt)\n \n return dt_obj", "def get_standard_date(dt: Union[datetime, None]=None):\n if dt is None:\n return get_cur_standard_date()\n elif isinstance(dt, datetime):\n return get_standard_datetime(dt)", "def to_date(value: Union[datetime.date, str]) -> datetime.date:\n if isinstance(value, datetime.date):\n return value\n return datetime.datetime.strptime(value, '%d%m%y').date()", "def test_validate_datetime(self):\n self.datatrue = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/report_2_counts.csv'))\n\n self.datafalse = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/random_date1.csv'))\n\n self.test1 = utils.validate_datetime(self.datatrue)\n\n self.test2 = utils.validate_datetime(self.datafalse)\n\n self.assertTrue(isinstance(self.test1, pd.DataFrame))\n\n self.assertTrue(np.dtype('datetime64[ns]') in self.test1.dtypes.tolist())\n\n self.assertFalse(np.dtype('datetime64[ns]') in self.test2.dtypes.tolist())", "def test_startdate(self):\n req = create_request(query_string={'dates': '7d'})\n eq_(startdate(req), date.today() - timedelta(days=7))\n\n req = create_request(query_string={'dates': 'today'})\n eq_(startdate(req), date.today())\n\n req = create_request(query_string={'day': '2012-05-24'})\n eq_(startdate(req), datetime(2012, 5, 24))\n\n req = create_request(query_string={'week': '2012-05-24'})\n eq_(startdate(req), datetime(2012, 5, 21))\n\n req = create_request(query_string={'day': 'today'})\n eq_(startdate(req), None)\n\n req = create_request()\n eq_(startdate(req), None)", "def skip_or_run_datetime_test(func):\n\n return skip_or_run_test_pcall_require(func, 'datetime',\n 'does not support datetime type')" ]
[ "0.70018554", "0.6946764", "0.685827", "0.6737637", "0.6687125", "0.66750145", "0.65934587", "0.64681107", "0.64442796", "0.6383153", "0.6327889", "0.6267114", "0.62509316", "0.6243825", "0.61947256", "0.61693084", "0.6164787", "0.61503243", "0.6149337", "0.6140435", "0.6130044", "0.6091354", "0.60866994", "0.60289365", "0.60000366", "0.5995617", "0.5982296", "0.59665155", "0.59436685", "0.592822", "0.5928084", "0.59218353", "0.59188", "0.59040886", "0.59036964", "0.5893911", "0.5885699", "0.58716726", "0.58714145", "0.5864744", "0.5847463", "0.584709", "0.5845475", "0.5835695", "0.5833691", "0.5825446", "0.5822748", "0.58022964", "0.57890326", "0.57628345", "0.57574654", "0.57559735", "0.5753858", "0.5750545", "0.5748174", "0.5739618", "0.57281435", "0.57209617", "0.5710314", "0.5698845", "0.5695159", "0.5693909", "0.5693238", "0.5689988", "0.56853026", "0.56827176", "0.5672582", "0.5663891", "0.5663338", "0.56622076", "0.56599385", "0.56542575", "0.56529087", "0.5640696", "0.5590274", "0.5589275", "0.5581397", "0.5575251", "0.5571779", "0.5569282", "0.55653805", "0.5540192", "0.55361515", "0.55317426", "0.5527787", "0.552741", "0.55266", "0.5525895", "0.5523532", "0.55119157", "0.55116045", "0.551136", "0.55092394", "0.5503254", "0.55005383", "0.5499707", "0.5496651", "0.54950213", "0.5493594", "0.5491746", "0.5489275" ]
0.0
-1
Ensure a date with hours and minutes is retuned as a datetime.
def test_date_with_time(self): from sosbeacon.utils import format_datetime date = datetime(year=2012, month=8, day=30, hour=7, minute=13) encoded = format_datetime(date) self.assertEqual('08/30/12 07:13', encoded)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ensure_datetime(ob: AnyDatetime) -> datetime.datetime:\n if isinstance(ob, datetime.datetime):\n return ob\n date = cast(datetime.date, ob)\n time = cast(datetime.time, ob)\n if isinstance(ob, datetime.date):\n time = datetime.time()\n if isinstance(ob, datetime.time):\n date = datetime.date(1900, 1, 1)\n return datetime.datetime.combine(date, time)", "def to_datetime(date: Union[dt.datetime, dt.date]) -> dt.datetime:\n if isinstance(date, dt.datetime):\n return dt.datetime(date.year, date.month, date.day, date.hour, date.minute, date.second)\n elif isinstance(date, dt.date):\n return dt.datetime(date.year, date.month, date.day)\n else:\n raise ValueError(\"<{0}>'s type is not recognized. Its type is <{1}>\".format(date, type(date)))", "def validate_datetime(self, current_date):\n valid_minute = None\n valid_hour = None\n MIN_HOUR = 0\n MAX_HOUR = 23\n MIN_MINUTE = 0\n MAX_MINUTE = 59\n TIME_SEPARATOR = u':'\n\n hour, minute = current_date.split(TIME_SEPARATOR)\n\n try:\n if ((MIN_HOUR <= int(hour) <= MAX_HOUR) and\n (MIN_MINUTE <= int(minute) <= MAX_MINUTE)):\n valid_minute = int(minute)\n valid_hour = int(hour)\n except ValueError as e:\n logging.error(u'Given current time is invalid %s', e)\n\n valid_datetime = {u'hour': valid_hour, u'minute': valid_minute}\n\n return valid_datetime", "def _get_datetime_or_error(self) -> dt:\n return dt(\n *(self.date[key] for key in ['year', 'month', 'day'] if key in self.date)\n )", "def make_datetime_obj(date, time):\n\n conv_date = datetime.strptime(date, \"%Y-%m-%d\").date()\n conv_time = datetime.strptime(time, \"%H:%M\").time()\n\n return datetime.combine(conv_date, conv_time)", "def make_datetime():\n date = input(\"Please give a date as month/day/year, (month ex jan feb): \")\n time = input(\"Please give a time in hour:minute (AM/PM): \")\n the_datetime = date + time\n try:\n our_datetime = datetime.datetime.strptime(the_datetime, \"%b/%d/%Y%I:%M %p\")\n return our_datetime\n except ValueError:\n return make_datetime()", "def datetime_object(__date):\n if isinstance(__date, datetime.datetime):\n return datetime.datetime(__date.year, __date.month, __date.day, __date.hour, __date.minute, __date.second)\n return None", "def datetime(self):\n\n time = self.time()\n if time is None or time < 0 or time >= 24:\n time = 0\n\n try:\n d = datetime(self.year(), self.month(), self.day()) + timedelta(hours=time)\n return d\n except:\n return", "def _datetime(year, month, day, hour, minute, second):\n try:\n return datetime.datetime(\n year, month, day, hour, minute, second, tzinfo=datetime.timezone.utc\n )\n except ValueError:\n invalid_datetime = (\n f\"{year:04d}-{month:02d}-{day:02d} \"\n f\"{hour:02d}:{minute:02d}:{second:02d}\"\n )\n raise ftputil.error.ParserError(\n \"invalid datetime {0!r}\".format(invalid_datetime)\n )", "def datetime(self):\n time = self.time()\n if time is None or time < 0 or time >= 24:\n time = 0\n\n try:\n d = datetime(self.year(), self.month(), self.day()) + \\\n timedelta(hours=time)\n return d\n except:\n return", "def test_14_digit_datetime(self):\n obj = awstats_reader.awstats_datetime('20091130165230')\n dt = datetime.datetime(2009, 11, 30, 16, 52, 30)\n self.assertEqual(obj, dt)", "def date_to_datetime(date, time=None):\n if time is None:\n time = dt.datetime.min.time()\n return dt.datetime.combine(date, time)", "def test_date_with_zero_hours(self):\n from sosbeacon.utils import format_datetime\n\n date = datetime(year=2012, month=8, day=30, hour=0, minute=13)\n encoded = format_datetime(date)\n self.assertEqual('08/30/12 00:13', encoded)", "def is_datetime(self):\n answer = self._call('is_datetime')\n return answer.yes", "def test_14_digit_datetime_detection(self):\n obj = awstats_reader.awstats_datetime('20091130165230')\n self.assertTrue(isinstance(obj, awstats_reader.AwstatsDateTime))", "def test_date_with_zero_minutes(self):\n from sosbeacon.utils import format_datetime\n\n date = datetime(year=2012, month=8, day=30, hour=19, minute=0)\n encoded = format_datetime(date)\n self.assertEqual('08/30/12 19:00', encoded)", "def convert_date_to_datetime(date_obj: date) -> datetime:\n # REF: https://stackoverflow.com/a/11619200\n assert isinstance(date_obj, date), \"Not a date object.\"\n # return the original value if the input is a datetime object\n if isinstance(date_obj, datetime):\n return date_obj\n return datetime.combine(date_obj, time())", "def get_datetime(time):\n year = int(time[0:4])\n month = int(time[5:7])\n day = int(time[8:10])\n hour = int(time[11:13])\n minute = int(time[14:16])\n second = int(time[17:19])\n return datetime(year, month, day, hour, minute, second)", "def get_simulate_date(start, end):\n start_year, start_month, start_day = parse_string_datetime(start)\n end_year, end_month, end_day = parse_string_datetime(end)\n if simulatedate_checkinput(start, end) == 0:\n start_time = datetime.datetime(start_year, start_month, start_day)\n end_time = datetime.datetime(end_year, end_month, end_day)\n return start_time, end_time", "def test_prepare_datetime(time):\n assert SSLLabsClient().prepare_datetime(time) == \"2018-03-17\"", "def datetime_from_time(time: datetime.time, date: datetime.date = datetime.date.today()):\n if type(time) == datetime.time:\n return datetime.datetime.combine(date, time)\n else:\n return time", "def get_datetime(hours):\n return datetime.datetime.utcfromtimestamp(hours * 60 * 60)", "def test_missing_report_datetime(self):\n self.assertEqual(datetime.datetime.min, self.__uft.datetime('raise'))", "def test_as_datetime(self):\n self.assertEqual(\n time_display.as_datetime(\n datetime(2020, 7, 31, 23, 59, 30, 357921),\n with_usec=True),\n '2020-07-31 23:59:30.357921')", "def get_datetime_from_time(value: datetime | time) -> datetime:\n if isinstance(value, time):\n value = datetime.combine(dt_util.now().date(), value, dt_util.DEFAULT_TIME_ZONE)\n if isinstance(value, datetime):\n value = value.replace(tzinfo=dt_util.DEFAULT_TIME_ZONE)\n if value > dt_util.now():\n raise ValidationError(\"Time cannot be in the future.\")\n return value", "def from_isodatetime(date_time: str) -> Optional[Union[datetime.datetime,\n datetime.timedelta,\n datetime.time]]:\n if not date_time:\n return None\n utc = datetime.timezone(datetime.timedelta(0))\n if date_time[:2] == 'PT':\n match = duration_re.match(date_time)\n if not match:\n raise ValueError(date_time)\n hours, minutes, seconds = match.group(\n 'hours'), match.group('minutes'), match.group('seconds')\n secs: float = 0\n if hours is not None:\n secs += int(match.group('hours')) * 3600\n if minutes is not None:\n secs += int(match.group('minutes')) * 60\n if seconds is not None:\n secs += float(match.group('seconds'))\n return datetime.timedelta(seconds=secs)\n if 'T' in date_time:\n try:\n return datetime.datetime.strptime(\n date_time, \"%Y-%m-%dT%H:%M:%S.%fZ\").replace(tzinfo=utc)\n except ValueError:\n pass\n try:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%dT%H:%M:%SZ\").replace(tzinfo=utc)\n except ValueError:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%dT%H:%MZ\").replace(tzinfo=utc)\n if not 'Z' in date_time:\n try:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%d\")\n except ValueError:\n return datetime.datetime.strptime(date_time, \"%d/%m/%Y\")\n return datetime.datetime.strptime(date_time, \"%H:%M:%SZ\").replace(tzinfo=utc).time()", "def make_datetime_from_dicom_date(date: str, time: str = None) -> Optional[datetime]:\n try:\n return datetime(\n year=int(date[:4]),\n month=int(date[4:6]),\n day=int(date[6:8]),\n hour=int(time[:2]),\n minute=int(time[2:4]),\n second=int(time[4:6])\n )\n except (ValueError, TypeError):\n try:\n return datetime(\n year=int(date[:4]),\n month=int(date[4:6]),\n day=int(date[6:8]),\n )\n except (ValueError, TypeError):\n return None", "def test_valid_turnaround_hour(self):\n valid_datetime = datetime.datetime(2020, 8, 21, 13, 2)\n self.assertRaises(ValueError, calculate_due_date, valid_datetime, 0)\n self.assertRaises(ValueError, calculate_due_date, valid_datetime, -1)\n calculate_due_date(valid_datetime, 1)", "def time_to_datetime(time):\n split_time = time.split(':')\n hour = int(split_time[0])\n minutes = int(split_time[1])\n now = dt.datetime.now()\n time_as_datetime = dt.datetime(now.year, now.month, now.day,\n hour=hour, minute=minutes)\n\n # Need to change the day to tommorow if time has already passed\n if time_as_datetime < now:\n day = now.day + 1\n time_as_datetime = dt.datetime(now.year, now.month, day,\n hour=hour, minute=minutes)\n\n return time_as_datetime", "def correct_datetime(record_datetime):\n assert record_datetime.date() == datetime.now(timezone.utc).date()", "def get_date():\n return datetime(2000, 1, 1, 0, 0, 0, FLOOD_TIMEOUT+1)", "def datetime(self):\n return datetime.datetime(self.year, self.month, self.day,\n self.hour, self.min, self.sec)", "def test_as_date(self):\n self.assertEqual(\n time_display.as_date(\n datetime(2020, 7, 31, 23, 59, 30, 357921)),\n '2020-07-31')", "def test_datetime_creation(self):\n self.assertIsInstance(self.user_1.created_at, datetime)\n self.assertIsInstance(self.user_1.updated_at, datetime)", "def ensure_datetime(value: Union[Date, DateTime, str], **kwargs: int) -> DateTime:\n ## Check the type of the value and act accordingly.\n if isinstance(value, DateTime):\n ## It is a datetime instance. Nothing to be done. Just return with replacement:\n return value.replace(**kwargs) # type: ignore\n elif isinstance(value, Date):\n ## It is a date instance. Set to morning and return with replacement:\n return DateTime.combine(value, DateTime.min.time()).replace(**kwargs) # type: ignore\n elif isinstance(value, str):\n ## We have a string. Attempt to parse and return with replacement:\n try:\n return parse(value).replace(**kwargs) # type: ignore\n except ParserError:\n raise ValueError(\"Can not parse value into a date/time object: {}\".format(value))\n\n ## We have a problem here: Don't know how to convert other\n ## object. Raise a value error:\n raise ValueError(\"Don't know how to convert value to date/time object: {}\".format(value))", "def test_paid_at(self):\n\n self.assertIsInstance(self.obj.paid_at, datetime)", "def force_to_datetime(val):\n if not val:\n return val\n elif isinstance(val, datetime.datetime):\n return val\n elif isinstance(val, datetime.date):\n return datetime.datetime.combine(val, datetime.time())\n elif isinstance(val, str):\n return string_to_datetime(val)\n else:\n raise ValueError(\"object must be date or datetime!\")", "def gen_date_with_mins(date):\n datetime_info = date.split(', ')\n time = convert_12_to_24(datetime_info[0])\n month, day = datetime_info[1].split(' ')\n year = datetime_info[2]\n day, year = map(int, [day, year])\n date = datetime.date(year, MONTHS[month.capitalize()], day)\n time = datetime.time(time.hour, time.minute)\n return date, time", "def test_convert_datetime():", "def validate_date(date_given, date_format=\"%Y-%m-%d\"):\n # FIXME: use datetime.datetime.strptime(date_given, \"%Y-%m-%d\")\n # after upgrading Python => 2.5\n return datetime.datetime(*(time.strptime(date_given, date_format)[0:6]))", "def convert_datetime(date, time):\n return datetime.datetime.strptime(date + \" \" + time, '%Y-%m-%d %H:%M:%S')", "def test_8_digit_date(self):\n obj = awstats_reader.awstats_datetime('20091130')\n dt = datetime.date(2009, 11, 30)\n self.assertEqual(obj, dt)", "def test_datetime_from(self):\n dt = sync.datetime_from('2012-09-09T00:00:00') # EDT\n self.assertEqual(2012, dt.year)\n self.assertEqual(9, dt.month)\n self.assertEqual(10, dt.day)\n self.assertEqual(3, dt.hour)\n self.assertEqual(59, dt.minute)\n self.assertEqual(59, dt.second)\n self.assertEqual(dt.tzname(), 'UTC')\n\n dt = sync.datetime_from('2012-12-09T00:00:00') # EST\n self.assertEqual(2012, dt.year)\n self.assertEqual(12, dt.month)\n self.assertEqual(10, dt.day)\n self.assertEqual(4, dt.hour)\n self.assertEqual(59, dt.minute)\n self.assertEqual(59, dt.second)\n self.assertEqual(dt.tzname(), 'UTC')", "def mocked_time():\n return datetime.datetime(2017, 10, 27, 22, 54, 56, 566179)", "def datetime(self):\n return datetime(*tuple(self))", "def _date_to_datetime(value):\r\n assert isinstance(value, datetime.date)\r\n return datetime.datetime(value.year, value.month, value.day)", "def omniscient_datetime(*args):\n d = original_datetime(*args)\n if settings.USE_TZ:\n d = timezone.make_aware(d, timezone.utc)\n return d", "def convert_date_to_datetime(date):\n return datetime.combine(date, dtime()) if date else None", "def test_date_change_fails_on_naive_datetime(self):\n event = Event(\n Guild(12345),\n 'Some title',\n datetime(2020, 10, 10, 10, 10, tzinfo=utc),\n 'Some description')\n with self.assertRaises(ValueError):\n event.date = datetime(2020, 10, 10, 12, 10)", "def getDatetime(self, date):\n dt = datetime.datetime.strptime(date, \"%Y-%m-%d@%H:%M\")\n return dt", "def test_created_at(self):\n self.assertIsInstance(self.obj.created_at, datetime)", "def test_created_at(self):\n self.assertIsInstance(self.obj.created_at, datetime)", "def test_created_at(self):\n self.assertIsInstance(self.obj.created_at, datetime)", "def datetime_from_json(data):\n\n a = data['meta']['dateobs']\n year = int(a[:4])\n month = int(a[4:6])\n day = int(a[6:8])\n hour = int(a[9:11])\n time1 = datetime.datetime(year, month, day, hour)\n return time1", "def is_datetime(self) -> bool:\n return False", "def parse_time(time_input, *, force_datetime=False, allow_undefined=False, **kwargs):\n\n if allow_undefined and time_input in [None, '..']:\n return None\n\n if isinstance(time_input, dt.date):\n if force_datetime and not isinstance(time_input, dt.datetime):\n return date_to_datetime(time_input)\n\n if kwargs.get('ignoretz') and isinstance(time_input, dt.datetime):\n return time_input.replace(tzinfo=None)\n\n return time_input\n\n time = dateutil.parser.parse(time_input, **kwargs)\n if force_datetime or len(time_input) > 10: # This check is not very accurate but it works for iso format\n return time\n return time.date()", "def test_created_at(self):\n self.assertIsInstance(self.obj_ticket.created_at, datetime)", "def make_datetime(value):\n if value:\n return value\n return None", "def convert_date(date):\n date = get_nummeric_only(date) \n \n \n if len(date) == 8:\n\n year = int(date[:4]) \n month = int(date[4:6])\n day = int(date[6:8])\n \n date_time = dt.datetime(year,month,day)\n \n return date_time\n \n if len(date) == 12 or len(date) == 14:\n\n year = int(date[:4]) \n month = int(date[4:6])\n day = int(date[6:8])\n hour = int(date[8:10])\n minute = int(date[10:12])\n \n date_time = dt.datetime(year,month,day, hour, minute)\n \n return date_time\n else:\n return 0", "def test_datetime(self):\n diff = self.machine_date - self.actual_date < datetime.timedelta(0, 20, 0)", "def test_create_at(self):\n self.assertIsInstance(self.obj.created_at, datetime)", "def test_validate_datetime(self):\n self.datatrue = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/report_2_counts.csv'))\n\n self.datafalse = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/random_date1.csv'))\n\n self.test1 = utils.validate_datetime(self.datatrue)\n\n self.test2 = utils.validate_datetime(self.datafalse)\n\n self.assertTrue(isinstance(self.test1, pd.DataFrame))\n\n self.assertTrue(np.dtype('datetime64[ns]') in self.test1.dtypes.tolist())\n\n self.assertFalse(np.dtype('datetime64[ns]') in self.test2.dtypes.tolist())", "def test_convert_to_eod_datetime(self):\n assert convert_to_eod_datetime(date(2020, 1, 1)) == datetime(\n 2020, 1, 1, 23, 59, 59, 999999\n )\n assert convert_to_eod_datetime(datetime(2020, 1, 1)) == datetime(\n 2020, 1, 1, 23, 59, 59, 999999\n )\n assert convert_to_eod_datetime(datetime(2020, 1, 1, 2, 30)) == datetime(\n 2020, 1, 1, 2, 30\n )\n assert convert_to_eod_datetime(\"foo\") == \"foo\"", "def test_datetime(self):\n self.assertEqual(datetime.datetime.min, self.__jenkins.datetime(*('job',)))", "def from_isodatetime(date_time):\n if not date_time:\n return None\n if date_time[:2]=='PT':\n if 'M' in date_time:\n dt = datetime.datetime.strptime(date_time, \"PT%HH%MM%SS\")\n else:\n dt = datetime.datetime.strptime(date_time, \"PT%H:%M:%S\")\n secs = (dt.hour*60+dt.minute)*60 + dt.second\n return datetime.timedelta(seconds=secs)\n if 'T' in date_time:\n try:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n except ValueError:\n pass\n try:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%dT%H:%MZ\") \n if not 'Z' in date_time:\n try:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%d\")\n except ValueError:\n return datetime.datetime.strptime(date_time, \"%d/%m/%Y\")\n return datetime.datetime.strptime(date_time, \"%H:%M:%SZ\").time()", "def test_datetime_field():", "def __handleDateAttribute(self, timeString):\n try:\n if len(str(timeString)) == 13:\n return datetime.datetime.fromtimestamp(timeString / 1000)\n else:\n return datetime.datetime.fromtimestamp(timeString)\n except ValueError:\n return None\n except TypeError:\n return None", "def test_instantiation_fail_on_naive_datetime(self):\n with self.assertRaises(ValueError):\n Event(\n Guild(12345),\n 'Some title',\n datetime(2020, 10, 10, 10, 10), # Missing timezone\n 'Some description')", "def _get_datetime(date):\n return datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.000%z')", "def _parse_datetime(self, data):\n d = data.find('./itdDate').attrib\n t = data.find('./itdTime').attrib\n\n # -1 means nope, there is no time known\n if d['weekday'] == '-1' or d['day'] == '-1' or t['minute'] == '-1':\n return None\n\n # convert time – the EFA API likes to talk about 24:00, so we have to correct that.\n result = datetime(int(d['year']), int(d['month']), int(d['day']), min(int(t['hour']), 23), int(t['minute']))\n if int(t['hour']) == 24:\n result += timedelta(hours=1)\n return result", "def generate_datetime(self, hour):\n minute = randint(0, self.MINUTES_IN_HOUR - 1)\n second = randint(0, self.SECONDS_IN_MINUTE - 1)\n\n return datetime.strptime(str(self.date.year) + \"-\"\n + str(self.date.month) + \"-\"\n + str(self.date.day) + \" \"\n + str(hour) + \":\"\n + str(minute) + \":\"\n + str(second), '%Y-%m-%d %H:%M:%S')", "def _cleanup_date(self, date):\n if (not date or\n not isinstance(date, str) or\n 'N/A' in date):\n return None\n\n date, time = date.split()\n day, month, year = date.split('.')\n hour, minute = time.split(':')\n year, month, day, hour, minute = [int(x) for x in (year, month, day, hour, minute)]\n\n # For 2-digit years, 1969/2068 is the wrap-around (POSIX standard)\n if (69 <= year < 100):\n year += 1900\n elif (0 <= year < 69):\n year += 2000\n\n return datetime.datetime(year, month, day, hour, minute)", "def datetime(when = None):\n if when is None:\n return xmlrpclib.DateTime(time.gmtime())\n else:\n return xmlrpclib.DateTime(time.gmtime(when))", "def test_parse_datetime(\n test_input: str,\n expected: datetime.datetime,\n):\n assert tvmaze.parsers.parse_datetime(test_input) == expected", "def test_8_digit_date_detection(self):\n obj = awstats_reader.awstats_datetime('20091130')\n self.assertTrue(isinstance(obj, awstats_reader.AwstatsDate))", "def ensure_date(value: Union[Date, DateTime, str], **kwargs: int) -> Date:\n return ensure_datetime(value, **kwargs).date()", "def datetime_checkinput(year, month, day):\n try:\n datetime.datetime(year, month, day)\n except:\n raise Invaliddatetimeinput\n return 0", "def test_make_datetime_aware(settings):\n # Set the TIME_ZONE in the settings.\n settings.TIME_ZONE = \"America/New_York\"\n\n # Calling make_datetime_aware() returns a timezone-aware datetime referring\n # to the moment from the naive_datetime_obj, in the appropriate time zone.\n naive_datetime_str = \"2018-01-01T20:00:00\"\n expected_datetime_obj = make_aware(\n datetime(year=2018, month=1, day=1, hour=20, minute=0, second=0),\n timezone=pytz.timezone(\"America/New_York\"),\n )\n assert make_datetime_aware(naive_datetime_str) == expected_datetime_obj\n\n # Calling make_datetime_aware() for non-datetime strings returns None.\n dt_str = \"\"\n aware_dt = make_datetime_aware(dt_str)\n assert aware_dt == None\n dt_str = None\n aware_dt = make_datetime_aware(dt_str)\n assert aware_dt == None", "def test_gen_iso_datetime_str(self):\n\n est = pytz.timezone(\"EST\")\n some_date = datetime.datetime(\n year=1985, month=11, day=15,\n hour=6, minute=0,\n tzinfo=est)\n\n # Generate an ISO datetime string, and parse it. This will convert it\n # from EST to UTC.\n parsed_dtime = parse_datetime(gen_iso_datetime_str(some_date))\n # EST is -5, so the hour should now be 11.\n self.assertEqual(parsed_dtime.hour, 11)\n # tzinfo will be UTC, since we converted it upon parsing.\n self.assertIs(parsed_dtime.tzinfo, UTC_TZINFO)", "def test_created_at(self):\n self.assertIsInstance(self.certificate_history.datetime, datetime.datetime)", "def parse_date_time(date, time):\n dt = '%s %s' % (date, time)\n return datetime.strptime(dt, '%Y-%m-%d %H:%M:%S')", "def construct_date(date_and_time=None):\n if not date_and_time:\n date_and_time = datetime.datetime.today()\n date_and_time = date_and_time.strftime('%Y%m%d-%H%M')\n return date_and_time", "def test_get_date(self):\n d = modis.get_date(os.path.splitext(self.fname)[0])\n self.assertTrue(isinstance(d, datetime.datetime))\n self.assertEqual(d, datetime.datetime(2015, 9, 23))", "def _CreateDateTime(self, date_string, time_string):\n if not date_string and not time_string:\n raise errors.ParseError('Missing date or time string.')\n\n # TODO: Figure out how McAfee sets Day First and use that here.\n # The in-file time format is '07/30/2013\\t10:22:48 AM'.\n\n try:\n month_string, day_of_month_string, year_string = date_string.split('/')\n year = int(year_string, 10)\n month = int(month_string, 10)\n day_of_month = int(day_of_month_string, 10)\n except (AttributeError, ValueError):\n raise errors.ParseError('Unsupported date string: {0:s}'.format(\n date_string))\n\n try:\n time_value, time_suffix = time_string.split(' ')\n hours_string, minutes_string, seconds_string = time_value.split(':')\n hours = int(hours_string, 10)\n minutes = int(minutes_string, 10)\n seconds = int(seconds_string, 10)\n except (AttributeError, ValueError):\n raise errors.ParseError('Unsupported time string: {0:s}'.format(\n time_string))\n\n if time_suffix == 'PM':\n hours += 12\n elif time_suffix != 'AM':\n raise errors.ParseError('Unsupported time suffix: {0:s}.'.format(\n time_suffix))\n\n time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)\n\n try:\n date_time = dfdatetime_time_elements.TimeElements(\n time_elements_tuple=time_elements_tuple)\n\n except ValueError:\n raise errors.ParseError(\n 'Unsupported date and time strings: {0:s} {1:s}'.format(\n date_string, time_string))\n\n date_time.is_local_time = True\n return date_time", "def datetime(\n self, datetimetuple: tuple[int, int, int, int, int, int, int, int], /\n ) -> None:", "def dt(*args, **kwargs):\n \n if 'tz' in kwargs:\n tzinfo = kwargs.pop('tz')\n else:\n tzinfo = kwargs.pop('tzinfo', None)\n \n offset_s = kwargs.pop('offset_s', None) \n trustme = kwargs.pop('trustme', False)\n \n if kwargs:\n raise Exception('Unhandled arg: \"{}\".'.format(kwargs))\n \n if (tzinfo is None):\n # Force UTC if None\n timezone = timezonize('UTC')\n \n else:\n timezone = timezonize(tzinfo)\n \n if offset_s:\n # Special case for the offset\n from dateutil.tz import tzoffset\n if not tzoffset:\n raise Exception('For ISO date with offset please install dateutil')\n time_dt = datetime.datetime(*args, tzinfo=tzoffset(None, offset_s))\n else:\n # Standard timezone\n time_dt = timezone.localize(datetime.datetime(*args))\n\n # Check consistency \n if not trustme and timezone != pytz.UTC:\n if not check_dt_consistency(time_dt):\n raise ValueError('Sorry, time {} does not exists on timezone {}'.format(time_dt, timezone))\n\n return time_dt", "def test_change_of_date_field_type(self):\n # Create a user, device and a heartbeat\n user = Dummy.create_user()\n device = Dummy.create_device(user)\n heartbeat_timestamp = datetime(2015, 12, 15, 1, 23, 45, tzinfo=pytz.utc)\n\n heartbeat = Dummy.create_report(\n HeartBeat, device, date=heartbeat_timestamp\n )\n\n # Assert that the date is of type datetime\n self.assertIsInstance(heartbeat.date, datetime)\n\n # Run the migration\n self.migrate_to_dest()\n\n # Assert that the date is now of type date and has the correct value\n heartbeat = HeartBeat.objects.first()\n self.assertIsInstance(heartbeat.date, date)\n self.assertEqual(heartbeat.date, heartbeat_timestamp.date())", "def get_datetime(self, date_str, end=False):\n if date_str == \"now\" or date_str is None:\n return datetime.utcnow()\n try:\n return parser.parse(date_str)\n except:\n print(\"failed to parse date string:\", date_str)\n exit()", "def test_year_year_zero_datetime_parse(self):\n obj = awstats_reader.awstats_datetime('0')\n self.assertEqual(obj,datetime.datetime(1,1,1))", "def test_dt_obj_to_date(settings):\n # Set the TIME_ZONE in the settings.\n settings.TIME_ZONE = \"America/New_York\"\n\n # A datetime that refers to 20:00 on January 1, 2018 in New York.\n datetime_2018_01_01_2000_ny = make_aware(\n datetime(year=2018, month=1, day=1, hour=20, minute=0, second=0),\n timezone=pytz.timezone(\"America/New_York\"),\n )\n # A datetime that refers to 01:00 on January 2, 2018 in UTC. Note: this is\n # the same exact moment in time as datetime_2018_01_01_2000_ny.\n datetime_2018_01_02_0100_utc = make_aware(\n datetime(year=2018, month=1, day=2, hour=1, minute=0, second=0),\n timezone=pytz.utc,\n )\n # Calling dt_obj_to_date() returns the date at each of these moments in the\n # \"America/New_York\" timezone, which was \"2018-01-01\".\n assert dt_obj_to_date(datetime_2018_01_01_2000_ny) == date(\n year=2018, month=1, day=1\n )\n assert dt_obj_to_date(datetime_2018_01_02_0100_utc) == date(\n year=2018, month=1, day=1\n )\n\n # Calling dt_obj_to_date() for non datetime objects returns None.\n dt_obj = \"A random string.\"\n date_obj = dt_obj_to_date(dt_obj)\n assert date_obj == None\n dt_obj = 123438\n date_obj = dt_obj_to_date(dt_obj)\n assert date_obj == None\n dt_obj = {\"dict\": \"random dict\"}\n date_obj = dt_obj_to_date(dt_obj)\n assert date_obj == None\n dt_obj = None\n date_obj = dt_obj_to_date(dt_obj)\n assert date_obj == None", "def parse_datetime(val):\n try: return maya.parse(val).datetime()\n except: return val", "def test_parse_valid_time_of_day(self):\n from azure.servicefabric.models.time_of_day import (\n TimeOfDay\n )\n\n res = sf_c.parse_time_of_day({\n 'Hour': 23,\n 'Minute': 59\n })\n\n self.assertIsInstance(res, TimeOfDay)\n\n self.assertEqual(res.hour, 23)\n self.assertEqual(res.minute, 59)\n\n res2 = sf_c.parse_time_of_day({\n 'Hour': 0,\n 'Minute': 0\n })\n\n self.assertIsInstance(res2, TimeOfDay)\n\n self.assertEqual(res2.hour, 0)\n self.assertEqual(res2.minute, 0)", "def _get_datetime(dt_value):\n result = None\n if result is None:\n result = _get_datetime_from_format(dt_value, \"%Y-%m-%d %H:%M:%S.%f %z\")\n if result is None:\n result = _get_datetime_from_format(dt_value, \"%Y-%m-%d %H:%M:%S.%f\")\n if result is None:\n result = _get_datetime_from_format(dt_value, \"%Y-%m-%d %H:%M:%S\")\n if result is None:\n raise RuntimeError(\n \"Failed to convert '{}' into datetime object\".format(dt_value))\n return result", "def test_sample_one_date(self):\r\n self.assertEqual(self.test_sample.date, datetime.datetime(2016, 2, 12, 7, 34, 26))", "def test_hotshot_check_date(self):\n date_first = check_date('2015-11-03 13:21:02.071381', '03.11.2015', '20.11.2015')\n date_second = check_date('2015-11-03 13:21:02.071381', '01.11.2015', '02.11.2015')\n\n self.assertTrue(date_first)\n self.assertFalse(date_second)", "def parse_date(date) -> datetime:\n\n if type(date) == datetime:\n return date\n try:\n date_object = datetime.strptime(date.replace(\" \", \"\"), \"%m/%d/%Y\")\n except (TypeError, ValueError) as exc:\n print(\"Cannot format time \" + str(exc), file=sys.stderr)\n return None\n return date_object", "def to_datetime(self,date):\n\n dt = datetime.datetime(date.year,date.month,date.day)\n return timezone.make_aware(dt, timezone.get_default_timezone())", "def test_event_start_datetimes(self):\n e = Event(title=self.TITLE, creator=self.USER,\n start_date=self.START.date(),\n start_time=self.START.time())\n self.assertEqual(e.start_datetime(), self.START)", "def normalize_date(__date, type='arrival'):\n\n if isinstance(__date, datetime.datetime):\n # If type is arrival pass RESERVATION_START_TIME as tup else RESERVATION_END_TIME as tup\n if type == 'arrival':\n tup = RESERVATION_START_TIME\n else:\n tup = RESERVATION_END_TIME\n\n __date = datetime.datetime(__date.year, __date.month, __date.day,\n tup[0], tup[1], tup[2])\n\n return __date\n return None", "def _test_df_datetime(self, df):\n date_raw = df['DateListed'].iloc[0] # e.g. '2016-01-07 00:00:00'\n first_date_time = datetime.strptime(date_raw, \"%Y-%m-%d %H:%M:%S\")\n assert first_date_time >= datetime(2016, 1, 1, 0, 0) and \\\n first_date_time < datetime(2017, 1, 1, 0, 0)" ]
[ "0.7026758", "0.65030086", "0.64972097", "0.64888424", "0.6435325", "0.64127433", "0.6333002", "0.62801087", "0.6271071", "0.62366825", "0.62104833", "0.6207101", "0.61587965", "0.6086302", "0.6086026", "0.60570335", "0.6036885", "0.60081786", "0.5994509", "0.5992334", "0.5986238", "0.59723413", "0.5968278", "0.5952078", "0.59489226", "0.5946141", "0.5931302", "0.5930926", "0.5925765", "0.58943254", "0.5880345", "0.5870759", "0.58672464", "0.5855865", "0.58509964", "0.5850896", "0.5830388", "0.5814125", "0.58081305", "0.5801773", "0.5800448", "0.57937056", "0.5792072", "0.5791013", "0.5787746", "0.5785216", "0.57847255", "0.5782132", "0.5777153", "0.5766421", "0.57642514", "0.57642514", "0.57642514", "0.57566285", "0.5756309", "0.5750545", "0.57457644", "0.573755", "0.5721146", "0.572106", "0.57197833", "0.57112885", "0.57093954", "0.56913626", "0.56885654", "0.5673596", "0.56533337", "0.5652288", "0.56482285", "0.56481814", "0.5632578", "0.5625023", "0.56113875", "0.5597236", "0.5596863", "0.5596183", "0.5592935", "0.55654895", "0.556385", "0.55118185", "0.55046076", "0.5499297", "0.5494221", "0.5492973", "0.54894084", "0.5486482", "0.54763055", "0.54709417", "0.54557455", "0.54448766", "0.5444262", "0.54426754", "0.5436019", "0.5432334", "0.54314774", "0.5420928", "0.5419185", "0.5415574", "0.54102206", "0.54083645" ]
0.64528507
4
Ensure a date with minutes but no hours is retuned as a datetime.
def test_date_with_zero_hours(self): from sosbeacon.utils import format_datetime date = datetime(year=2012, month=8, day=30, hour=0, minute=13) encoded = format_datetime(date) self.assertEqual('08/30/12 00:13', encoded)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_date_with_zero_minutes(self):\n from sosbeacon.utils import format_datetime\n\n date = datetime(year=2012, month=8, day=30, hour=19, minute=0)\n encoded = format_datetime(date)\n self.assertEqual('08/30/12 19:00', encoded)", "def ensure_datetime(ob: AnyDatetime) -> datetime.datetime:\n if isinstance(ob, datetime.datetime):\n return ob\n date = cast(datetime.date, ob)\n time = cast(datetime.time, ob)\n if isinstance(ob, datetime.date):\n time = datetime.time()\n if isinstance(ob, datetime.time):\n date = datetime.date(1900, 1, 1)\n return datetime.datetime.combine(date, time)", "def test_missing_report_datetime(self):\n self.assertEqual(datetime.datetime.min, self.__uft.datetime('raise'))", "def _get_datetime_or_error(self) -> dt:\n return dt(\n *(self.date[key] for key in ['year', 'month', 'day'] if key in self.date)\n )", "def datetime_object(__date):\n if isinstance(__date, datetime.datetime):\n return datetime.datetime(__date.year, __date.month, __date.day, __date.hour, __date.minute, __date.second)\n return None", "def validate_datetime(self, current_date):\n valid_minute = None\n valid_hour = None\n MIN_HOUR = 0\n MAX_HOUR = 23\n MIN_MINUTE = 0\n MAX_MINUTE = 59\n TIME_SEPARATOR = u':'\n\n hour, minute = current_date.split(TIME_SEPARATOR)\n\n try:\n if ((MIN_HOUR <= int(hour) <= MAX_HOUR) and\n (MIN_MINUTE <= int(minute) <= MAX_MINUTE)):\n valid_minute = int(minute)\n valid_hour = int(hour)\n except ValueError as e:\n logging.error(u'Given current time is invalid %s', e)\n\n valid_datetime = {u'hour': valid_hour, u'minute': valid_minute}\n\n return valid_datetime", "def _cleanup_date(self, date):\n if (not date or\n not isinstance(date, str) or\n 'N/A' in date):\n return None\n\n date, time = date.split()\n day, month, year = date.split('.')\n hour, minute = time.split(':')\n year, month, day, hour, minute = [int(x) for x in (year, month, day, hour, minute)]\n\n # For 2-digit years, 1969/2068 is the wrap-around (POSIX standard)\n if (69 <= year < 100):\n year += 1900\n elif (0 <= year < 69):\n year += 2000\n\n return datetime.datetime(year, month, day, hour, minute)", "def to_datetime(date: Union[dt.datetime, dt.date]) -> dt.datetime:\n if isinstance(date, dt.datetime):\n return dt.datetime(date.year, date.month, date.day, date.hour, date.minute, date.second)\n elif isinstance(date, dt.date):\n return dt.datetime(date.year, date.month, date.day)\n else:\n raise ValueError(\"<{0}>'s type is not recognized. Its type is <{1}>\".format(date, type(date)))", "def omniscient_datetime(*args):\n d = original_datetime(*args)\n if settings.USE_TZ:\n d = timezone.make_aware(d, timezone.utc)\n return d", "def test_date_change_fails_on_naive_datetime(self):\n event = Event(\n Guild(12345),\n 'Some title',\n datetime(2020, 10, 10, 10, 10, tzinfo=utc),\n 'Some description')\n with self.assertRaises(ValueError):\n event.date = datetime(2020, 10, 10, 12, 10)", "def test_instantiation_fail_on_naive_datetime(self):\n with self.assertRaises(ValueError):\n Event(\n Guild(12345),\n 'Some title',\n datetime(2020, 10, 10, 10, 10), # Missing timezone\n 'Some description')", "def datetime(self):\n\n time = self.time()\n if time is None or time < 0 or time >= 24:\n time = 0\n\n try:\n d = datetime(self.year(), self.month(), self.day()) + timedelta(hours=time)\n return d\n except:\n return", "def gen_date_with_mins(date):\n datetime_info = date.split(', ')\n time = convert_12_to_24(datetime_info[0])\n month, day = datetime_info[1].split(' ')\n year = datetime_info[2]\n day, year = map(int, [day, year])\n date = datetime.date(year, MONTHS[month.capitalize()], day)\n time = datetime.time(time.hour, time.minute)\n return date, time", "def datetime(self):\n time = self.time()\n if time is None or time < 0 or time >= 24:\n time = 0\n\n try:\n d = datetime(self.year(), self.month(), self.day()) + \\\n timedelta(hours=time)\n return d\n except:\n return", "def date_to_datetime(date, time=None):\n if time is None:\n time = dt.datetime.min.time()\n return dt.datetime.combine(date, time)", "def test_parse_no_timezone_no_strict():\n d = iso8601.parse_datetime(\"2007-01-01T08:00:00\", strict=False)\n assert d.year == 2007\n assert d.month == 1\n assert d.day == 1\n assert d.hour == 8\n assert d.minute == 0\n assert d.second == 0\n assert d.microsecond == 0\n assert d.tzinfo == iso8601.UTC", "def naive(self):\n return self.datetime.replace(tzinfo=None)", "def make_datetime():\n date = input(\"Please give a date as month/day/year, (month ex jan feb): \")\n time = input(\"Please give a time in hour:minute (AM/PM): \")\n the_datetime = date + time\n try:\n our_datetime = datetime.datetime.strptime(the_datetime, \"%b/%d/%Y%I:%M %p\")\n return our_datetime\n except ValueError:\n return make_datetime()", "def ensure_datetime(value: Union[Date, DateTime, str], **kwargs: int) -> DateTime:\n ## Check the type of the value and act accordingly.\n if isinstance(value, DateTime):\n ## It is a datetime instance. Nothing to be done. Just return with replacement:\n return value.replace(**kwargs) # type: ignore\n elif isinstance(value, Date):\n ## It is a date instance. Set to morning and return with replacement:\n return DateTime.combine(value, DateTime.min.time()).replace(**kwargs) # type: ignore\n elif isinstance(value, str):\n ## We have a string. Attempt to parse and return with replacement:\n try:\n return parse(value).replace(**kwargs) # type: ignore\n except ParserError:\n raise ValueError(\"Can not parse value into a date/time object: {}\".format(value))\n\n ## We have a problem here: Don't know how to convert other\n ## object. Raise a value error:\n raise ValueError(\"Don't know how to convert value to date/time object: {}\".format(value))", "def test_valid_turnaround_hour(self):\n valid_datetime = datetime.datetime(2020, 8, 21, 13, 2)\n self.assertRaises(ValueError, calculate_due_date, valid_datetime, 0)\n self.assertRaises(ValueError, calculate_due_date, valid_datetime, -1)\n calculate_due_date(valid_datetime, 1)", "def parse_time(time_input, *, force_datetime=False, allow_undefined=False, **kwargs):\n\n if allow_undefined and time_input in [None, '..']:\n return None\n\n if isinstance(time_input, dt.date):\n if force_datetime and not isinstance(time_input, dt.datetime):\n return date_to_datetime(time_input)\n\n if kwargs.get('ignoretz') and isinstance(time_input, dt.datetime):\n return time_input.replace(tzinfo=None)\n\n return time_input\n\n time = dateutil.parser.parse(time_input, **kwargs)\n if force_datetime or len(time_input) > 10: # This check is not very accurate but it works for iso format\n return time\n return time.date()", "def get_simulate_date(start, end):\n start_year, start_month, start_day = parse_string_datetime(start)\n end_year, end_month, end_day = parse_string_datetime(end)\n if simulatedate_checkinput(start, end) == 0:\n start_time = datetime.datetime(start_year, start_month, start_day)\n end_time = datetime.datetime(end_year, end_month, end_day)\n return start_time, end_time", "def make_datetime_from_dicom_date(date: str, time: str = None) -> Optional[datetime]:\n try:\n return datetime(\n year=int(date[:4]),\n month=int(date[4:6]),\n day=int(date[6:8]),\n hour=int(time[:2]),\n minute=int(time[2:4]),\n second=int(time[4:6])\n )\n except (ValueError, TypeError):\n try:\n return datetime(\n year=int(date[:4]),\n month=int(date[4:6]),\n day=int(date[6:8]),\n )\n except (ValueError, TypeError):\n return None", "def make_datetime(value):\n if value:\n return value\n return None", "def test_date_with_time(self):\n from sosbeacon.utils import format_datetime\n\n date = datetime(year=2012, month=8, day=30, hour=7, minute=13)\n encoded = format_datetime(date)\n self.assertEqual('08/30/12 07:13', encoded)", "def test_datetime_with_naive_duedate_only_fails(self):\n # setup\n specify_wd = self.project.activity('Specify wheel diameter')\n\n # save old values\n old_start, old_due = datetime.strptime(specify_wd._json_data.get('start_date'), ISOFORMAT), \\\n datetime.strptime(specify_wd._json_data.get('due_date'), ISOFORMAT)\n naive_duedate = datetime(2017, 6, 5, 5, 0, 0)\n with warnings.catch_warnings(record=False) as w:\n warnings.simplefilter(\"ignore\")\n specify_wd.edit(due_date=naive_duedate)\n\n # teardown\n with warnings.catch_warnings(record=False) as w:\n warnings.simplefilter(\"ignore\")\n specify_wd.edit(due_date=old_due)", "def test_event_start_datetimes_none_with_incomplete_data(self):\n e = Event(title=self.TITLE, creator=self.USER,\n start_date=self.START.date())\n self.assertIsNone(e.start_datetime())\n\n f = Event(title=self.TITLE, creator=self.USER,\n start_time=self.START.time())\n self.assertIsNone(f.start_datetime())", "def test_14_digit_datetime(self):\n obj = awstats_reader.awstats_datetime('20091130165230')\n dt = datetime.datetime(2009, 11, 30, 16, 52, 30)\n self.assertEqual(obj, dt)", "def get_datetime_from_time(value: datetime | time) -> datetime:\n if isinstance(value, time):\n value = datetime.combine(dt_util.now().date(), value, dt_util.DEFAULT_TIME_ZONE)\n if isinstance(value, datetime):\n value = value.replace(tzinfo=dt_util.DEFAULT_TIME_ZONE)\n if value > dt_util.now():\n raise ValidationError(\"Time cannot be in the future.\")\n return value", "def test_14_digit_datetime_detection(self):\n obj = awstats_reader.awstats_datetime('20091130165230')\n self.assertTrue(isinstance(obj, awstats_reader.AwstatsDateTime))", "def _datetime(year, month, day, hour, minute, second):\n try:\n return datetime.datetime(\n year, month, day, hour, minute, second, tzinfo=datetime.timezone.utc\n )\n except ValueError:\n invalid_datetime = (\n f\"{year:04d}-{month:02d}-{day:02d} \"\n f\"{hour:02d}:{minute:02d}:{second:02d}\"\n )\n raise ftputil.error.ParserError(\n \"invalid datetime {0!r}\".format(invalid_datetime)\n )", "def force_to_datetime(val):\n if not val:\n return val\n elif isinstance(val, datetime.datetime):\n return val\n elif isinstance(val, datetime.date):\n return datetime.datetime.combine(val, datetime.time())\n elif isinstance(val, str):\n return string_to_datetime(val)\n else:\n raise ValueError(\"object must be date or datetime!\")", "def get_date():\n return datetime(2000, 1, 1, 0, 0, 0, FLOOD_TIMEOUT+1)", "def is_datetime(self):\n answer = self._call('is_datetime')\n return answer.yes", "def test_none_handling(self):\r\n dt = DatetimeTest.create(test_id=0, created_at=None)", "def datetime(self):\n return datetime.datetime(self.year, self.month, self.day,\n self.hour, self.min, self.sec)", "def is_datetime(self) -> bool:\n return False", "def test_none_handling(self):\n dt = DatetimeTest.create(test_id=0, created_at=None)", "def test_datetime(self):\n self.assertEqual(datetime.datetime.min, self.__jenkins.datetime(*('job',)))", "def make_naive_utc(date_time: datetime.datetime) -> datetime.datetime:\n utc_timezone = datetime.timezone(datetime.timedelta(seconds=0))\n return date_time.astimezone(utc_timezone).replace(tzinfo=None)", "def from_isodatetime(date_time: str) -> Optional[Union[datetime.datetime,\n datetime.timedelta,\n datetime.time]]:\n if not date_time:\n return None\n utc = datetime.timezone(datetime.timedelta(0))\n if date_time[:2] == 'PT':\n match = duration_re.match(date_time)\n if not match:\n raise ValueError(date_time)\n hours, minutes, seconds = match.group(\n 'hours'), match.group('minutes'), match.group('seconds')\n secs: float = 0\n if hours is not None:\n secs += int(match.group('hours')) * 3600\n if minutes is not None:\n secs += int(match.group('minutes')) * 60\n if seconds is not None:\n secs += float(match.group('seconds'))\n return datetime.timedelta(seconds=secs)\n if 'T' in date_time:\n try:\n return datetime.datetime.strptime(\n date_time, \"%Y-%m-%dT%H:%M:%S.%fZ\").replace(tzinfo=utc)\n except ValueError:\n pass\n try:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%dT%H:%M:%SZ\").replace(tzinfo=utc)\n except ValueError:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%dT%H:%MZ\").replace(tzinfo=utc)\n if not 'Z' in date_time:\n try:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%d\")\n except ValueError:\n return datetime.datetime.strptime(date_time, \"%d/%m/%Y\")\n return datetime.datetime.strptime(date_time, \"%H:%M:%SZ\").replace(tzinfo=utc).time()", "def valid_time(time):\n if time.hour < 0 or time.minute < 0 or time.second < 0:\n return False\n if time.minute >= 60 or time.second >= 60:\n return False\n return True", "def valid_time(time):\n if time.hour < 0 or time.minute < 0 or time.second < 0:\n return False\n if time.minute >= 60 or time.second >= 60:\n return False\n return True", "def convert_date_to_datetime(date):\n return datetime.combine(date, dtime()) if date else None", "def fake_date_without_day(value):\n return date(year=value[0], month=value[1], day=1)", "def datetime_from_time(time: datetime.time, date: datetime.date = datetime.date.today()):\n if type(time) == datetime.time:\n return datetime.datetime.combine(date, time)\n else:\n return time", "def correct_datetime(record_datetime):\n assert record_datetime.date() == datetime.now(timezone.utc).date()", "def make_datetime_obj(date, time):\n\n conv_date = datetime.strptime(date, \"%Y-%m-%d\").date()\n conv_time = datetime.strptime(time, \"%H:%M\").time()\n\n return datetime.combine(conv_date, conv_time)", "def skip_or_run_datetime_test(func):\n\n return skip_or_run_test_pcall_require(func, 'datetime',\n 'does not support datetime type')", "def create_naive_datetime():\n # datetime(year, month, day, hour, minute, second, microsecond, tzinfo)\n dt = datetime.datetime(2017, 1, 1, 0, 0, 0)\n print(\"year: \" + str(dt.year))\n print(\"second: \" + str(dt.second))", "def mocked_time():\n return datetime.datetime(2017, 10, 27, 22, 54, 56, 566179)", "def test_search_date_false(self):\n test = self.ec.search_date()\n self.assertEqual(self.ecm.test_minute(), test)", "def test_as_datetime(self):\n self.assertEqual(\n time_display.as_datetime(\n datetime(2020, 7, 31, 23, 59, 30, 357921),\n with_usec=True),\n '2020-07-31 23:59:30.357921')", "def test_year_year_zero_datetime_parse(self):\n obj = awstats_reader.awstats_datetime('0')\n self.assertEqual(obj,datetime.datetime(1,1,1))", "def __handleDateAttribute(self, timeString):\n try:\n if len(str(timeString)) == 13:\n return datetime.datetime.fromtimestamp(timeString / 1000)\n else:\n return datetime.datetime.fromtimestamp(timeString)\n except ValueError:\n return None\n except TypeError:\n return None", "def validate_date(date_given, date_format=\"%Y-%m-%d\"):\n # FIXME: use datetime.datetime.strptime(date_given, \"%Y-%m-%d\")\n # after upgrading Python => 2.5\n return datetime.datetime(*(time.strptime(date_given, date_format)[0:6]))", "def get_datetime(time):\n year = int(time[0:4])\n month = int(time[5:7])\n day = int(time[8:10])\n hour = int(time[11:13])\n minute = int(time[14:16])\n second = int(time[17:19])\n return datetime(year, month, day, hour, minute, second)", "def convert_date(date):\n date = get_nummeric_only(date) \n \n \n if len(date) == 8:\n\n year = int(date[:4]) \n month = int(date[4:6])\n day = int(date[6:8])\n \n date_time = dt.datetime(year,month,day)\n \n return date_time\n \n if len(date) == 12 or len(date) == 14:\n\n year = int(date[:4]) \n month = int(date[4:6])\n day = int(date[6:8])\n hour = int(date[8:10])\n minute = int(date[10:12])\n \n date_time = dt.datetime(year,month,day, hour, minute)\n \n return date_time\n else:\n return 0", "def date_minute(date):\n return date.minute", "def test_datetime(self):\n diff = self.machine_date - self.actual_date < datetime.timedelta(0, 20, 0)", "def ensure_date(value: Union[Date, DateTime, str], **kwargs: int) -> Date:\n return ensure_datetime(value, **kwargs).date()", "def get_datetime(hours):\n return datetime.datetime.utcfromtimestamp(hours * 60 * 60)", "def parse_datetime(val):\n try: return maya.parse(val).datetime()\n except: return val", "def is_date(dt):\n return isinstance(dt, datetime.date) and not isinstance(dt, datetime.datetime)", "def convert_date_to_datetime(date_obj: date) -> datetime:\n # REF: https://stackoverflow.com/a/11619200\n assert isinstance(date_obj, date), \"Not a date object.\"\n # return the original value if the input is a datetime object\n if isinstance(date_obj, datetime):\n return date_obj\n return datetime.combine(date_obj, time())", "def datetime(self):\n return datetime(*tuple(self))", "def test_datetime_when_some_checks_have_no_date(self, mock_url_read):\n mock_url_read.return_value = \\\n '{\"value\": [{\"LastScan\": {\"ScanCompletedOn\": \"2016-12-14T00:01:30.737+01:00\", ' \\\n '\"Comment\": \"Attempt to perform scan on 2/13/2017 8:00:06 PM - No code changes were ' \\\n 'detected; No code changes were detected No code changes were detected\"}}]}'\n self.assertEqual(datetime.datetime(2017, 2, 13, 20, 0, 6), self.__report.datetime('id'))\n mock_url_read.assert_called_once_with(\n 'http://url/Cxwebinterface/odata/v1/Projects?$expand=LastScan&$filter=Name%20eq%20%27id%27')", "def get_tommorows_noon_time():\n dt = datetime.combine(date.today() + timedelta(days=1), datetime.min.time())\n return dt", "def normalize_date(__date, type='arrival'):\n\n if isinstance(__date, datetime.datetime):\n # If type is arrival pass RESERVATION_START_TIME as tup else RESERVATION_END_TIME as tup\n if type == 'arrival':\n tup = RESERVATION_START_TIME\n else:\n tup = RESERVATION_END_TIME\n\n __date = datetime.datetime(__date.year, __date.month, __date.day,\n tup[0], tup[1], tup[2])\n\n return __date\n return None", "def datetime_checkinput(year, month, day):\n try:\n datetime.datetime(year, month, day)\n except:\n raise Invaliddatetimeinput\n return 0", "def time_to_datetime(time):\n split_time = time.split(':')\n hour = int(split_time[0])\n minutes = int(split_time[1])\n now = dt.datetime.now()\n time_as_datetime = dt.datetime(now.year, now.month, now.day,\n hour=hour, minute=minutes)\n\n # Need to change the day to tommorow if time has already passed\n if time_as_datetime < now:\n day = now.day + 1\n time_as_datetime = dt.datetime(now.year, now.month, day,\n hour=hour, minute=minutes)\n\n return time_as_datetime", "def test_parse_valid_time_of_day(self):\n from azure.servicefabric.models.time_of_day import (\n TimeOfDay\n )\n\n res = sf_c.parse_time_of_day({\n 'Hour': 23,\n 'Minute': 59\n })\n\n self.assertIsInstance(res, TimeOfDay)\n\n self.assertEqual(res.hour, 23)\n self.assertEqual(res.minute, 59)\n\n res2 = sf_c.parse_time_of_day({\n 'Hour': 0,\n 'Minute': 0\n })\n\n self.assertIsInstance(res2, TimeOfDay)\n\n self.assertEqual(res2.hour, 0)\n self.assertEqual(res2.minute, 0)", "def clean_value(self, value):\n if not isinstance(value, datetime):\n try:\n value = dateutil.parser.parse(value)\n except ValueError:\n raise ValidationError(\n gettext('This timestamp is not a valid ISO 8601 '\n 'date/time'))\n\n if timezone.is_naive(value):\n try:\n value = timezone.make_aware(value,\n timezone.get_current_timezone())\n except AmbiguousTimeError:\n raise ValidationError(\n gettext('This timestamp needs a UTC offset to avoid '\n 'being ambiguous due to daylight savings time '\n 'changes'))\n\n return value", "def datetime_or_none(dt_str: str | None) -> dt.datetime | None:\n\n if not dt_str:\n return None\n\n str_format = \"%Y-%m-%dT%H:%M:%S\"\n dt_val = dt.datetime.strptime(dt_str, str_format)\n\n return adjust_icms_v1_datetime(dt_val)", "def test_validate_datetime(self):\n self.datatrue = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/report_2_counts.csv'))\n\n self.datafalse = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/random_date1.csv'))\n\n self.test1 = utils.validate_datetime(self.datatrue)\n\n self.test2 = utils.validate_datetime(self.datafalse)\n\n self.assertTrue(isinstance(self.test1, pd.DataFrame))\n\n self.assertTrue(np.dtype('datetime64[ns]') in self.test1.dtypes.tolist())\n\n self.assertFalse(np.dtype('datetime64[ns]') in self.test2.dtypes.tolist())", "def test_make_datetime_aware(settings):\n # Set the TIME_ZONE in the settings.\n settings.TIME_ZONE = \"America/New_York\"\n\n # Calling make_datetime_aware() returns a timezone-aware datetime referring\n # to the moment from the naive_datetime_obj, in the appropriate time zone.\n naive_datetime_str = \"2018-01-01T20:00:00\"\n expected_datetime_obj = make_aware(\n datetime(year=2018, month=1, day=1, hour=20, minute=0, second=0),\n timezone=pytz.timezone(\"America/New_York\"),\n )\n assert make_datetime_aware(naive_datetime_str) == expected_datetime_obj\n\n # Calling make_datetime_aware() for non-datetime strings returns None.\n dt_str = \"\"\n aware_dt = make_datetime_aware(dt_str)\n assert aware_dt == None\n dt_str = None\n aware_dt = make_datetime_aware(dt_str)\n assert aware_dt == None", "def test_non_input(self):\n from sosbeacon.utils import format_datetime\n\n encoded = format_datetime(None)\n self.assertEqual('', encoded)", "def parse_date(date) -> datetime:\n\n if type(date) == datetime:\n return date\n try:\n date_object = datetime.strptime(date.replace(\" \", \"\"), \"%m/%d/%Y\")\n except (TypeError, ValueError) as exc:\n print(\"Cannot format time \" + str(exc), file=sys.stderr)\n return None\n return date_object", "def test_8_digit_date(self):\n obj = awstats_reader.awstats_datetime('20091130')\n dt = datetime.date(2009, 11, 30)\n self.assertEqual(obj, dt)", "def datetime(when = None):\n if when is None:\n return xmlrpclib.DateTime(time.gmtime())\n else:\n return xmlrpclib.DateTime(time.gmtime(when))", "def test_long_not_configured(self):\n locale = {\n 'timeformat': '%H:%M',\n 'dateformat': '%Y-%m-%d',\n 'longdateformat': '',\n 'datetimeformat': '%Y-%m-%d %H:%M',\n 'longdatetimeformat': '',\n }\n assert (dt.datetime(2017, 1, 1), True) == guessdatetimefstr(\n '2017-1-1'.split(), locale=locale, default_day=dt.datetime.today())\n assert (dt.datetime(2017, 1, 1, 16, 30), False) == guessdatetimefstr(\n '2017-1-1 16:30'.split(), locale=locale, default_day=dt.datetime.today())", "def test_prepare_datetime(time):\n assert SSLLabsClient().prepare_datetime(time) == \"2018-03-17\"", "def test_datetime_from(self):\n dt = sync.datetime_from('2012-09-09T00:00:00') # EDT\n self.assertEqual(2012, dt.year)\n self.assertEqual(9, dt.month)\n self.assertEqual(10, dt.day)\n self.assertEqual(3, dt.hour)\n self.assertEqual(59, dt.minute)\n self.assertEqual(59, dt.second)\n self.assertEqual(dt.tzname(), 'UTC')\n\n dt = sync.datetime_from('2012-12-09T00:00:00') # EST\n self.assertEqual(2012, dt.year)\n self.assertEqual(12, dt.month)\n self.assertEqual(10, dt.day)\n self.assertEqual(4, dt.hour)\n self.assertEqual(59, dt.minute)\n self.assertEqual(59, dt.second)\n self.assertEqual(dt.tzname(), 'UTC')", "def test_freeze_with_timezone_aware_datetime_in_non_utc():\n utc_now = datetime.datetime.utcnow()\n assert utc_now.tzinfo is None\n assert utc_now == datetime.datetime(1970, 1, 1, 4)", "def test_datetime_on_missing_values(self, mock_url_read):\n mock_url_read.return_value = '{\"value\": []}'\n self.assertEqual(datetime.datetime.min, self.__report.datetime('id'))\n mock_url_read.assert_called_once_with(\n 'http://url/Cxwebinterface/odata/v1/Projects?$expand=LastScan&$filter=Name%20eq%20%27id%27')", "def _parse_datetime(self, data):\n d = data.find('./itdDate').attrib\n t = data.find('./itdTime').attrib\n\n # -1 means nope, there is no time known\n if d['weekday'] == '-1' or d['day'] == '-1' or t['minute'] == '-1':\n return None\n\n # convert time – the EFA API likes to talk about 24:00, so we have to correct that.\n result = datetime(int(d['year']), int(d['month']), int(d['day']), min(int(t['hour']), 23), int(t['minute']))\n if int(t['hour']) == 24:\n result += timedelta(hours=1)\n return result", "def from_isodatetime(date_time):\n if not date_time:\n return None\n if date_time[:2]=='PT':\n if 'M' in date_time:\n dt = datetime.datetime.strptime(date_time, \"PT%HH%MM%SS\")\n else:\n dt = datetime.datetime.strptime(date_time, \"PT%H:%M:%S\")\n secs = (dt.hour*60+dt.minute)*60 + dt.second\n return datetime.timedelta(seconds=secs)\n if 'T' in date_time:\n try:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n except ValueError:\n pass\n try:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%dT%H:%MZ\") \n if not 'Z' in date_time:\n try:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%d\")\n except ValueError:\n return datetime.datetime.strptime(date_time, \"%d/%m/%Y\")\n return datetime.datetime.strptime(date_time, \"%H:%M:%SZ\").time()", "def test_parse_time_with_interval(self):\n now = datetime(2015, 2, 1, 0, 0, 0)\n self.assertEqual(parse_time(\"-0s\", now), now)\n self.assertEqual(\n parse_time(\"-1s\", now), datetime(2015, 1, 31, 23, 59, 59))\n self.assertEqual(\n parse_time(\"-1s\", now), datetime(2015, 1, 31, 23, 59, 59))\n self.assertEqual(\n parse_time(\"-2w\", now), datetime(2015, 1, 18, 0, 0, 0))\n self.assertEqual(\n parse_time(\"-2w\", datetime(2015, 1, 24, 10, 15, 25)),\n datetime(2015, 1, 10, 10, 15, 25))", "def start(self):\n if self.start_time is None:\n time = datetime.time(hour=19, tzinfo=CET)\n else:\n time = self.start_time.replace(tzinfo=CET)\n return datetime.datetime.combine(self.date, time)", "def asdatetime(self):\n tznaive = self.timezoneNaive()\n if tznaive:\n tzinfo = None\n else:\n tzinfo = _TZINFO[self._tz].tzinfo\n second = int(self._second)\n microsec = self.micros() % 1000000\n dt = datetime(self._year, self._month, self._day, self._hour,\n self._minute, second, microsec, tzinfo)\n return dt", "def new_datetime(year, month=None, day=None,\n hour=0, minute=0, second=0,\n microsecond=0):\n return datetime(year, month, day, hour, minute,\n second, microsecond, tzinfo=timezone)", "def _parse_datetime(value):\n return parse(value).replace(tzinfo=pytz.utc) if value else None", "def get_datetime_before_given_minutes(minutes):\n from datetime import datetime\n import datetime as dt\n date_obj_before_3min = datetime.now()- dt.timedelta(minutes=minutes)\n return date_obj_before_3min", "def _date_to_datetime(value):\r\n assert isinstance(value, datetime.date)\r\n return datetime.datetime(value.year, value.month, value.day)", "def test_bad_interval(self):\n # Intentionally set a small interval (3 min) to fail.\n interval = np.timedelta64(3, 'm')\n self.assertFalse(utils.check_timestamps(self.times, interval))", "def test_as_date(self):\n self.assertEqual(\n time_display.as_date(\n datetime(2020, 7, 31, 23, 59, 30, 357921)),\n '2020-07-31')", "def test_8_digit_date_detection(self):\n obj = awstats_reader.awstats_datetime('20091130')\n self.assertTrue(isinstance(obj, awstats_reader.AwstatsDate))", "def _report_datetime(self, metric_source_id: str) -> DateTime:\n try:\n test_suites = self.__test_suites(metric_source_id)\n except UrlOpener.url_open_exceptions:\n return datetime.datetime.min\n except xml.etree.cElementTree.ParseError:\n return datetime.datetime.min\n if test_suites:\n timestamps = [test_suite.get('started-at') for test_suite in test_suites]\n date_times = [utils.parse_iso_datetime_local_naive(timestamp) for timestamp in timestamps if timestamp]\n if date_times:\n return min(date_times)\n logging.warning(\"Couldn't find timestamps in test suites in: %s\", metric_source_id)\n return datetime.datetime.min\n logging.warning(\"Couldn't find test suites in: %s\", metric_source_id)\n return datetime.datetime.min", "def test_datetime_missing(self, mock_url_read):\n mock_url_read.return_value = '{\"value\": [{\"LastScan\": {}}]}'\n self.assertEqual(datetime.datetime.min, self.__report.datetime('id'))\n mock_url_read.assert_called_once_with(\n 'http://url/Cxwebinterface/odata/v1/Projects?$expand=LastScan&$filter=Name%20eq%20%27id%27')", "def compute_pirep_valid(self, hour, minute):\n res = self.utcnow.replace(\n hour=hour, minute=minute, second=0, microsecond=0\n )\n if hour > self.utcnow.hour:\n res -= datetime.timedelta(hours=24)\n return res" ]
[ "0.694124", "0.6576329", "0.6497878", "0.63341737", "0.6285057", "0.60327333", "0.59235966", "0.5857965", "0.58377993", "0.5830965", "0.57997185", "0.57975805", "0.575214", "0.5735291", "0.5732054", "0.57108104", "0.5695891", "0.5693779", "0.5690543", "0.5684678", "0.56797", "0.56688464", "0.5661843", "0.5658016", "0.56371856", "0.5613566", "0.5609956", "0.55677384", "0.5552031", "0.5547457", "0.55365545", "0.5532943", "0.552832", "0.5522084", "0.54976773", "0.54953045", "0.54916245", "0.5489397", "0.5485955", "0.54767346", "0.54628026", "0.54569066", "0.54569066", "0.5455371", "0.5453056", "0.54396236", "0.542868", "0.54267627", "0.54217845", "0.54147124", "0.54086375", "0.53688514", "0.53636694", "0.5362121", "0.5354563", "0.5354373", "0.5334488", "0.53273684", "0.5318205", "0.5315857", "0.5304137", "0.53003895", "0.52949876", "0.52941996", "0.5293093", "0.5288111", "0.5286942", "0.5286587", "0.52854073", "0.52597535", "0.52556795", "0.5250486", "0.5246026", "0.52454203", "0.5243514", "0.524283", "0.5242567", "0.524245", "0.523154", "0.5224081", "0.52210474", "0.52167434", "0.52140194", "0.5201104", "0.5189943", "0.5181887", "0.5176781", "0.51647854", "0.51590395", "0.51506484", "0.51486146", "0.51255256", "0.51210684", "0.51168114", "0.5112042", "0.511061", "0.51038796", "0.50975955", "0.5086653", "0.50856364" ]
0.64184
3
Ensure a date with hours but no minutes is retuned as a datetime.
def test_date_with_zero_minutes(self): from sosbeacon.utils import format_datetime date = datetime(year=2012, month=8, day=30, hour=19, minute=0) encoded = format_datetime(date) self.assertEqual('08/30/12 19:00', encoded)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_date_with_zero_hours(self):\n from sosbeacon.utils import format_datetime\n\n date = datetime(year=2012, month=8, day=30, hour=0, minute=13)\n encoded = format_datetime(date)\n self.assertEqual('08/30/12 00:13', encoded)", "def ensure_datetime(ob: AnyDatetime) -> datetime.datetime:\n if isinstance(ob, datetime.datetime):\n return ob\n date = cast(datetime.date, ob)\n time = cast(datetime.time, ob)\n if isinstance(ob, datetime.date):\n time = datetime.time()\n if isinstance(ob, datetime.time):\n date = datetime.date(1900, 1, 1)\n return datetime.datetime.combine(date, time)", "def test_missing_report_datetime(self):\n self.assertEqual(datetime.datetime.min, self.__uft.datetime('raise'))", "def _get_datetime_or_error(self) -> dt:\n return dt(\n *(self.date[key] for key in ['year', 'month', 'day'] if key in self.date)\n )", "def datetime_object(__date):\n if isinstance(__date, datetime.datetime):\n return datetime.datetime(__date.year, __date.month, __date.day, __date.hour, __date.minute, __date.second)\n return None", "def _cleanup_date(self, date):\n if (not date or\n not isinstance(date, str) or\n 'N/A' in date):\n return None\n\n date, time = date.split()\n day, month, year = date.split('.')\n hour, minute = time.split(':')\n year, month, day, hour, minute = [int(x) for x in (year, month, day, hour, minute)]\n\n # For 2-digit years, 1969/2068 is the wrap-around (POSIX standard)\n if (69 <= year < 100):\n year += 1900\n elif (0 <= year < 69):\n year += 2000\n\n return datetime.datetime(year, month, day, hour, minute)", "def test_instantiation_fail_on_naive_datetime(self):\n with self.assertRaises(ValueError):\n Event(\n Guild(12345),\n 'Some title',\n datetime(2020, 10, 10, 10, 10), # Missing timezone\n 'Some description')", "def datetime(self):\n\n time = self.time()\n if time is None or time < 0 or time >= 24:\n time = 0\n\n try:\n d = datetime(self.year(), self.month(), self.day()) + timedelta(hours=time)\n return d\n except:\n return", "def test_date_change_fails_on_naive_datetime(self):\n event = Event(\n Guild(12345),\n 'Some title',\n datetime(2020, 10, 10, 10, 10, tzinfo=utc),\n 'Some description')\n with self.assertRaises(ValueError):\n event.date = datetime(2020, 10, 10, 12, 10)", "def test_valid_turnaround_hour(self):\n valid_datetime = datetime.datetime(2020, 8, 21, 13, 2)\n self.assertRaises(ValueError, calculate_due_date, valid_datetime, 0)\n self.assertRaises(ValueError, calculate_due_date, valid_datetime, -1)\n calculate_due_date(valid_datetime, 1)", "def to_datetime(date: Union[dt.datetime, dt.date]) -> dt.datetime:\n if isinstance(date, dt.datetime):\n return dt.datetime(date.year, date.month, date.day, date.hour, date.minute, date.second)\n elif isinstance(date, dt.date):\n return dt.datetime(date.year, date.month, date.day)\n else:\n raise ValueError(\"<{0}>'s type is not recognized. Its type is <{1}>\".format(date, type(date)))", "def datetime(self):\n time = self.time()\n if time is None or time < 0 or time >= 24:\n time = 0\n\n try:\n d = datetime(self.year(), self.month(), self.day()) + \\\n timedelta(hours=time)\n return d\n except:\n return", "def test_parse_no_timezone_no_strict():\n d = iso8601.parse_datetime(\"2007-01-01T08:00:00\", strict=False)\n assert d.year == 2007\n assert d.month == 1\n assert d.day == 1\n assert d.hour == 8\n assert d.minute == 0\n assert d.second == 0\n assert d.microsecond == 0\n assert d.tzinfo == iso8601.UTC", "def validate_datetime(self, current_date):\n valid_minute = None\n valid_hour = None\n MIN_HOUR = 0\n MAX_HOUR = 23\n MIN_MINUTE = 0\n MAX_MINUTE = 59\n TIME_SEPARATOR = u':'\n\n hour, minute = current_date.split(TIME_SEPARATOR)\n\n try:\n if ((MIN_HOUR <= int(hour) <= MAX_HOUR) and\n (MIN_MINUTE <= int(minute) <= MAX_MINUTE)):\n valid_minute = int(minute)\n valid_hour = int(hour)\n except ValueError as e:\n logging.error(u'Given current time is invalid %s', e)\n\n valid_datetime = {u'hour': valid_hour, u'minute': valid_minute}\n\n return valid_datetime", "def parse_time(time_input, *, force_datetime=False, allow_undefined=False, **kwargs):\n\n if allow_undefined and time_input in [None, '..']:\n return None\n\n if isinstance(time_input, dt.date):\n if force_datetime and not isinstance(time_input, dt.datetime):\n return date_to_datetime(time_input)\n\n if kwargs.get('ignoretz') and isinstance(time_input, dt.datetime):\n return time_input.replace(tzinfo=None)\n\n return time_input\n\n time = dateutil.parser.parse(time_input, **kwargs)\n if force_datetime or len(time_input) > 10: # This check is not very accurate but it works for iso format\n return time\n return time.date()", "def omniscient_datetime(*args):\n d = original_datetime(*args)\n if settings.USE_TZ:\n d = timezone.make_aware(d, timezone.utc)\n return d", "def test_none_handling(self):\r\n dt = DatetimeTest.create(test_id=0, created_at=None)", "def test_none_handling(self):\n dt = DatetimeTest.create(test_id=0, created_at=None)", "def naive(self):\n return self.datetime.replace(tzinfo=None)", "def test_event_start_datetimes_none_with_incomplete_data(self):\n e = Event(title=self.TITLE, creator=self.USER,\n start_date=self.START.date())\n self.assertIsNone(e.start_datetime())\n\n f = Event(title=self.TITLE, creator=self.USER,\n start_time=self.START.time())\n self.assertIsNone(f.start_datetime())", "def make_datetime(value):\n if value:\n return value\n return None", "def make_datetime():\n date = input(\"Please give a date as month/day/year, (month ex jan feb): \")\n time = input(\"Please give a time in hour:minute (AM/PM): \")\n the_datetime = date + time\n try:\n our_datetime = datetime.datetime.strptime(the_datetime, \"%b/%d/%Y%I:%M %p\")\n return our_datetime\n except ValueError:\n return make_datetime()", "def date_to_datetime(date, time=None):\n if time is None:\n time = dt.datetime.min.time()\n return dt.datetime.combine(date, time)", "def test_date_with_time(self):\n from sosbeacon.utils import format_datetime\n\n date = datetime(year=2012, month=8, day=30, hour=7, minute=13)\n encoded = format_datetime(date)\n self.assertEqual('08/30/12 07:13', encoded)", "def make_datetime_from_dicom_date(date: str, time: str = None) -> Optional[datetime]:\n try:\n return datetime(\n year=int(date[:4]),\n month=int(date[4:6]),\n day=int(date[6:8]),\n hour=int(time[:2]),\n minute=int(time[2:4]),\n second=int(time[4:6])\n )\n except (ValueError, TypeError):\n try:\n return datetime(\n year=int(date[:4]),\n month=int(date[4:6]),\n day=int(date[6:8]),\n )\n except (ValueError, TypeError):\n return None", "def is_datetime(self):\n answer = self._call('is_datetime')\n return answer.yes", "def is_datetime(self) -> bool:\n return False", "def ensure_datetime(value: Union[Date, DateTime, str], **kwargs: int) -> DateTime:\n ## Check the type of the value and act accordingly.\n if isinstance(value, DateTime):\n ## It is a datetime instance. Nothing to be done. Just return with replacement:\n return value.replace(**kwargs) # type: ignore\n elif isinstance(value, Date):\n ## It is a date instance. Set to morning and return with replacement:\n return DateTime.combine(value, DateTime.min.time()).replace(**kwargs) # type: ignore\n elif isinstance(value, str):\n ## We have a string. Attempt to parse and return with replacement:\n try:\n return parse(value).replace(**kwargs) # type: ignore\n except ParserError:\n raise ValueError(\"Can not parse value into a date/time object: {}\".format(value))\n\n ## We have a problem here: Don't know how to convert other\n ## object. Raise a value error:\n raise ValueError(\"Don't know how to convert value to date/time object: {}\".format(value))", "def test_datetime_with_naive_duedate_only_fails(self):\n # setup\n specify_wd = self.project.activity('Specify wheel diameter')\n\n # save old values\n old_start, old_due = datetime.strptime(specify_wd._json_data.get('start_date'), ISOFORMAT), \\\n datetime.strptime(specify_wd._json_data.get('due_date'), ISOFORMAT)\n naive_duedate = datetime(2017, 6, 5, 5, 0, 0)\n with warnings.catch_warnings(record=False) as w:\n warnings.simplefilter(\"ignore\")\n specify_wd.edit(due_date=naive_duedate)\n\n # teardown\n with warnings.catch_warnings(record=False) as w:\n warnings.simplefilter(\"ignore\")\n specify_wd.edit(due_date=old_due)", "def get_datetime_from_time(value: datetime | time) -> datetime:\n if isinstance(value, time):\n value = datetime.combine(dt_util.now().date(), value, dt_util.DEFAULT_TIME_ZONE)\n if isinstance(value, datetime):\n value = value.replace(tzinfo=dt_util.DEFAULT_TIME_ZONE)\n if value > dt_util.now():\n raise ValidationError(\"Time cannot be in the future.\")\n return value", "def get_simulate_date(start, end):\n start_year, start_month, start_day = parse_string_datetime(start)\n end_year, end_month, end_day = parse_string_datetime(end)\n if simulatedate_checkinput(start, end) == 0:\n start_time = datetime.datetime(start_year, start_month, start_day)\n end_time = datetime.datetime(end_year, end_month, end_day)\n return start_time, end_time", "def test_14_digit_datetime_detection(self):\n obj = awstats_reader.awstats_datetime('20091130165230')\n self.assertTrue(isinstance(obj, awstats_reader.AwstatsDateTime))", "def force_to_datetime(val):\n if not val:\n return val\n elif isinstance(val, datetime.datetime):\n return val\n elif isinstance(val, datetime.date):\n return datetime.datetime.combine(val, datetime.time())\n elif isinstance(val, str):\n return string_to_datetime(val)\n else:\n raise ValueError(\"object must be date or datetime!\")", "def fake_date_without_day(value):\n return date(year=value[0], month=value[1], day=1)", "def make_naive_utc(date_time: datetime.datetime) -> datetime.datetime:\n utc_timezone = datetime.timezone(datetime.timedelta(seconds=0))\n return date_time.astimezone(utc_timezone).replace(tzinfo=None)", "def from_isodatetime(date_time: str) -> Optional[Union[datetime.datetime,\n datetime.timedelta,\n datetime.time]]:\n if not date_time:\n return None\n utc = datetime.timezone(datetime.timedelta(0))\n if date_time[:2] == 'PT':\n match = duration_re.match(date_time)\n if not match:\n raise ValueError(date_time)\n hours, minutes, seconds = match.group(\n 'hours'), match.group('minutes'), match.group('seconds')\n secs: float = 0\n if hours is not None:\n secs += int(match.group('hours')) * 3600\n if minutes is not None:\n secs += int(match.group('minutes')) * 60\n if seconds is not None:\n secs += float(match.group('seconds'))\n return datetime.timedelta(seconds=secs)\n if 'T' in date_time:\n try:\n return datetime.datetime.strptime(\n date_time, \"%Y-%m-%dT%H:%M:%S.%fZ\").replace(tzinfo=utc)\n except ValueError:\n pass\n try:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%dT%H:%M:%SZ\").replace(tzinfo=utc)\n except ValueError:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%dT%H:%MZ\").replace(tzinfo=utc)\n if not 'Z' in date_time:\n try:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%d\")\n except ValueError:\n return datetime.datetime.strptime(date_time, \"%d/%m/%Y\")\n return datetime.datetime.strptime(date_time, \"%H:%M:%SZ\").replace(tzinfo=utc).time()", "def get_datetime(hours):\n return datetime.datetime.utcfromtimestamp(hours * 60 * 60)", "def correct_datetime(record_datetime):\n assert record_datetime.date() == datetime.now(timezone.utc).date()", "def is_date(dt):\n return isinstance(dt, datetime.date) and not isinstance(dt, datetime.datetime)", "def test_14_digit_datetime(self):\n obj = awstats_reader.awstats_datetime('20091130165230')\n dt = datetime.datetime(2009, 11, 30, 16, 52, 30)\n self.assertEqual(obj, dt)", "def skip_or_run_datetime_test(func):\n\n return skip_or_run_test_pcall_require(func, 'datetime',\n 'does not support datetime type')", "def _datetime(year, month, day, hour, minute, second):\n try:\n return datetime.datetime(\n year, month, day, hour, minute, second, tzinfo=datetime.timezone.utc\n )\n except ValueError:\n invalid_datetime = (\n f\"{year:04d}-{month:02d}-{day:02d} \"\n f\"{hour:02d}:{minute:02d}:{second:02d}\"\n )\n raise ftputil.error.ParserError(\n \"invalid datetime {0!r}\".format(invalid_datetime)\n )", "def get_date():\n return datetime(2000, 1, 1, 0, 0, 0, FLOOD_TIMEOUT+1)", "def convert_date_to_datetime(date):\n return datetime.combine(date, dtime()) if date else None", "def datetime_or_none(dt_str: str | None) -> dt.datetime | None:\n\n if not dt_str:\n return None\n\n str_format = \"%Y-%m-%dT%H:%M:%S\"\n dt_val = dt.datetime.strptime(dt_str, str_format)\n\n return adjust_icms_v1_datetime(dt_val)", "def test_non_input(self):\n from sosbeacon.utils import format_datetime\n\n encoded = format_datetime(None)\n self.assertEqual('', encoded)", "def test_year_year_zero_datetime_parse(self):\n obj = awstats_reader.awstats_datetime('0')\n self.assertEqual(obj,datetime.datetime(1,1,1))", "def make_datetime_obj(date, time):\n\n conv_date = datetime.strptime(date, \"%Y-%m-%d\").date()\n conv_time = datetime.strptime(time, \"%H:%M\").time()\n\n return datetime.combine(conv_date, conv_time)", "def datetime_from_time(time: datetime.time, date: datetime.date = datetime.date.today()):\n if type(time) == datetime.time:\n return datetime.datetime.combine(date, time)\n else:\n return time", "def _parse_datetime(self, data):\n d = data.find('./itdDate').attrib\n t = data.find('./itdTime').attrib\n\n # -1 means nope, there is no time known\n if d['weekday'] == '-1' or d['day'] == '-1' or t['minute'] == '-1':\n return None\n\n # convert time – the EFA API likes to talk about 24:00, so we have to correct that.\n result = datetime(int(d['year']), int(d['month']), int(d['day']), min(int(t['hour']), 23), int(t['minute']))\n if int(t['hour']) == 24:\n result += timedelta(hours=1)\n return result", "def create_naive_datetime():\n # datetime(year, month, day, hour, minute, second, microsecond, tzinfo)\n dt = datetime.datetime(2017, 1, 1, 0, 0, 0)\n print(\"year: \" + str(dt.year))\n print(\"second: \" + str(dt.second))", "def valid_time(time):\n if time.hour < 0 or time.minute < 0 or time.second < 0:\n return False\n if time.minute >= 60 or time.second >= 60:\n return False\n return True", "def valid_time(time):\n if time.hour < 0 or time.minute < 0 or time.second < 0:\n return False\n if time.minute >= 60 or time.second >= 60:\n return False\n return True", "def datetime(self):\n return datetime.datetime(self.year, self.month, self.day,\n self.hour, self.min, self.sec)", "def clean_value(self, value):\n if not isinstance(value, datetime):\n try:\n value = dateutil.parser.parse(value)\n except ValueError:\n raise ValidationError(\n gettext('This timestamp is not a valid ISO 8601 '\n 'date/time'))\n\n if timezone.is_naive(value):\n try:\n value = timezone.make_aware(value,\n timezone.get_current_timezone())\n except AmbiguousTimeError:\n raise ValidationError(\n gettext('This timestamp needs a UTC offset to avoid '\n 'being ambiguous due to daylight savings time '\n 'changes'))\n\n return value", "def ensure_date(value: Union[Date, DateTime, str], **kwargs: int) -> Date:\n return ensure_datetime(value, **kwargs).date()", "def test_datetime_when_some_checks_have_no_date(self, mock_url_read):\n mock_url_read.return_value = \\\n '{\"value\": [{\"LastScan\": {\"ScanCompletedOn\": \"2016-12-14T00:01:30.737+01:00\", ' \\\n '\"Comment\": \"Attempt to perform scan on 2/13/2017 8:00:06 PM - No code changes were ' \\\n 'detected; No code changes were detected No code changes were detected\"}}]}'\n self.assertEqual(datetime.datetime(2017, 2, 13, 20, 0, 6), self.__report.datetime('id'))\n mock_url_read.assert_called_once_with(\n 'http://url/Cxwebinterface/odata/v1/Projects?$expand=LastScan&$filter=Name%20eq%20%27id%27')", "def test_as_datetime(self):\n self.assertEqual(\n time_display.as_datetime(\n datetime(2020, 7, 31, 23, 59, 30, 357921),\n with_usec=True),\n '2020-07-31 23:59:30.357921')", "def test_prepare_datetime(time):\n assert SSLLabsClient().prepare_datetime(time) == \"2018-03-17\"", "def datetime_checkinput(year, month, day):\n try:\n datetime.datetime(year, month, day)\n except:\n raise Invaliddatetimeinput\n return 0", "def test_datetime(self):\n diff = self.machine_date - self.actual_date < datetime.timedelta(0, 20, 0)", "def test_parse_valid_time_of_day(self):\n from azure.servicefabric.models.time_of_day import (\n TimeOfDay\n )\n\n res = sf_c.parse_time_of_day({\n 'Hour': 23,\n 'Minute': 59\n })\n\n self.assertIsInstance(res, TimeOfDay)\n\n self.assertEqual(res.hour, 23)\n self.assertEqual(res.minute, 59)\n\n res2 = sf_c.parse_time_of_day({\n 'Hour': 0,\n 'Minute': 0\n })\n\n self.assertIsInstance(res2, TimeOfDay)\n\n self.assertEqual(res2.hour, 0)\n self.assertEqual(res2.minute, 0)", "def validate_date(date_given, date_format=\"%Y-%m-%d\"):\n # FIXME: use datetime.datetime.strptime(date_given, \"%Y-%m-%d\")\n # after upgrading Python => 2.5\n return datetime.datetime(*(time.strptime(date_given, date_format)[0:6]))", "def test_datetime_from(self):\n dt = sync.datetime_from('2012-09-09T00:00:00') # EDT\n self.assertEqual(2012, dt.year)\n self.assertEqual(9, dt.month)\n self.assertEqual(10, dt.day)\n self.assertEqual(3, dt.hour)\n self.assertEqual(59, dt.minute)\n self.assertEqual(59, dt.second)\n self.assertEqual(dt.tzname(), 'UTC')\n\n dt = sync.datetime_from('2012-12-09T00:00:00') # EST\n self.assertEqual(2012, dt.year)\n self.assertEqual(12, dt.month)\n self.assertEqual(10, dt.day)\n self.assertEqual(4, dt.hour)\n self.assertEqual(59, dt.minute)\n self.assertEqual(59, dt.second)\n self.assertEqual(dt.tzname(), 'UTC')", "def datetime(when = None):\n if when is None:\n return xmlrpclib.DateTime(time.gmtime())\n else:\n return xmlrpclib.DateTime(time.gmtime(when))", "def test_freeze_with_timezone_aware_datetime_in_non_utc():\n utc_now = datetime.datetime.utcnow()\n assert utc_now.tzinfo is None\n assert utc_now == datetime.datetime(1970, 1, 1, 4)", "def __handleDateAttribute(self, timeString):\n try:\n if len(str(timeString)) == 13:\n return datetime.datetime.fromtimestamp(timeString / 1000)\n else:\n return datetime.datetime.fromtimestamp(timeString)\n except ValueError:\n return None\n except TypeError:\n return None", "def get_datetime(time):\n year = int(time[0:4])\n month = int(time[5:7])\n day = int(time[8:10])\n hour = int(time[11:13])\n minute = int(time[14:16])\n second = int(time[17:19])\n return datetime(year, month, day, hour, minute, second)", "def test_make_datetime_aware(settings):\n # Set the TIME_ZONE in the settings.\n settings.TIME_ZONE = \"America/New_York\"\n\n # Calling make_datetime_aware() returns a timezone-aware datetime referring\n # to the moment from the naive_datetime_obj, in the appropriate time zone.\n naive_datetime_str = \"2018-01-01T20:00:00\"\n expected_datetime_obj = make_aware(\n datetime(year=2018, month=1, day=1, hour=20, minute=0, second=0),\n timezone=pytz.timezone(\"America/New_York\"),\n )\n assert make_datetime_aware(naive_datetime_str) == expected_datetime_obj\n\n # Calling make_datetime_aware() for non-datetime strings returns None.\n dt_str = \"\"\n aware_dt = make_datetime_aware(dt_str)\n assert aware_dt == None\n dt_str = None\n aware_dt = make_datetime_aware(dt_str)\n assert aware_dt == None", "def test_validate_datetime(self):\n self.datatrue = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/report_2_counts.csv'))\n\n self.datafalse = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/random_date1.csv'))\n\n self.test1 = utils.validate_datetime(self.datatrue)\n\n self.test2 = utils.validate_datetime(self.datafalse)\n\n self.assertTrue(isinstance(self.test1, pd.DataFrame))\n\n self.assertTrue(np.dtype('datetime64[ns]') in self.test1.dtypes.tolist())\n\n self.assertFalse(np.dtype('datetime64[ns]') in self.test2.dtypes.tolist())", "def test_gen_iso_datetime_str(self):\n\n est = pytz.timezone(\"EST\")\n some_date = datetime.datetime(\n year=1985, month=11, day=15,\n hour=6, minute=0,\n tzinfo=est)\n\n # Generate an ISO datetime string, and parse it. This will convert it\n # from EST to UTC.\n parsed_dtime = parse_datetime(gen_iso_datetime_str(some_date))\n # EST is -5, so the hour should now be 11.\n self.assertEqual(parsed_dtime.hour, 11)\n # tzinfo will be UTC, since we converted it upon parsing.\n self.assertIs(parsed_dtime.tzinfo, UTC_TZINFO)", "def time_to_datetime(time):\n split_time = time.split(':')\n hour = int(split_time[0])\n minutes = int(split_time[1])\n now = dt.datetime.now()\n time_as_datetime = dt.datetime(now.year, now.month, now.day,\n hour=hour, minute=minutes)\n\n # Need to change the day to tommorow if time has already passed\n if time_as_datetime < now:\n day = now.day + 1\n time_as_datetime = dt.datetime(now.year, now.month, day,\n hour=hour, minute=minutes)\n\n return time_as_datetime", "def from_isodatetime(date_time):\n if not date_time:\n return None\n if date_time[:2]=='PT':\n if 'M' in date_time:\n dt = datetime.datetime.strptime(date_time, \"PT%HH%MM%SS\")\n else:\n dt = datetime.datetime.strptime(date_time, \"PT%H:%M:%S\")\n secs = (dt.hour*60+dt.minute)*60 + dt.second\n return datetime.timedelta(seconds=secs)\n if 'T' in date_time:\n try:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n except ValueError:\n pass\n try:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%dT%H:%MZ\") \n if not 'Z' in date_time:\n try:\n return datetime.datetime.strptime(date_time, \"%Y-%m-%d\")\n except ValueError:\n return datetime.datetime.strptime(date_time, \"%d/%m/%Y\")\n return datetime.datetime.strptime(date_time, \"%H:%M:%SZ\").time()", "def convert_date_to_datetime(date_obj: date) -> datetime:\n # REF: https://stackoverflow.com/a/11619200\n assert isinstance(date_obj, date), \"Not a date object.\"\n # return the original value if the input is a datetime object\n if isinstance(date_obj, datetime):\n return date_obj\n return datetime.combine(date_obj, time())", "def convert_date(date):\n date = get_nummeric_only(date) \n \n \n if len(date) == 8:\n\n year = int(date[:4]) \n month = int(date[4:6])\n day = int(date[6:8])\n \n date_time = dt.datetime(year,month,day)\n \n return date_time\n \n if len(date) == 12 or len(date) == 14:\n\n year = int(date[:4]) \n month = int(date[4:6])\n day = int(date[6:8])\n hour = int(date[8:10])\n minute = int(date[10:12])\n \n date_time = dt.datetime(year,month,day, hour, minute)\n \n return date_time\n else:\n return 0", "def _parse_datetime(value):\n return parse(value).replace(tzinfo=pytz.utc) if value else None", "def test_8_digit_date(self):\n obj = awstats_reader.awstats_datetime('20091130')\n dt = datetime.date(2009, 11, 30)\n self.assertEqual(obj, dt)", "def normalize_date(__date, type='arrival'):\n\n if isinstance(__date, datetime.datetime):\n # If type is arrival pass RESERVATION_START_TIME as tup else RESERVATION_END_TIME as tup\n if type == 'arrival':\n tup = RESERVATION_START_TIME\n else:\n tup = RESERVATION_END_TIME\n\n __date = datetime.datetime(__date.year, __date.month, __date.day,\n tup[0], tup[1], tup[2])\n\n return __date\n return None", "def datetime(self):\n return datetime(*tuple(self))", "def test_as_date(self):\n self.assertEqual(\n time_display.as_date(\n datetime(2020, 7, 31, 23, 59, 30, 357921)),\n '2020-07-31')", "def parse_datetime(val):\n try: return maya.parse(val).datetime()\n except: return val", "def test_parse_none_time_of_day(self):\n\n res = sf_c.parse_time_of_day(None)\n self.assertIs(res, None)", "def test_convert_to_eod_datetime(self):\n assert convert_to_eod_datetime(date(2020, 1, 1)) == datetime(\n 2020, 1, 1, 23, 59, 59, 999999\n )\n assert convert_to_eod_datetime(datetime(2020, 1, 1)) == datetime(\n 2020, 1, 1, 23, 59, 59, 999999\n )\n assert convert_to_eod_datetime(datetime(2020, 1, 1, 2, 30)) == datetime(\n 2020, 1, 1, 2, 30\n )\n assert convert_to_eod_datetime(\"foo\") == \"foo\"", "def mocked_time():\n return datetime.datetime(2017, 10, 27, 22, 54, 56, 566179)", "def test_datetime_on_missing_values(self, mock_url_read):\n mock_url_read.return_value = '{\"value\": []}'\n self.assertEqual(datetime.datetime.min, self.__report.datetime('id'))\n mock_url_read.assert_called_once_with(\n 'http://url/Cxwebinterface/odata/v1/Projects?$expand=LastScan&$filter=Name%20eq%20%27id%27')", "def test_8_digit_date_detection(self):\n obj = awstats_reader.awstats_datetime('20091130')\n self.assertTrue(isinstance(obj, awstats_reader.AwstatsDate))", "def generate_datetime(self, hour):\n minute = randint(0, self.MINUTES_IN_HOUR - 1)\n second = randint(0, self.SECONDS_IN_MINUTE - 1)\n\n return datetime.strptime(str(self.date.year) + \"-\"\n + str(self.date.month) + \"-\"\n + str(self.date.day) + \" \"\n + str(hour) + \":\"\n + str(minute) + \":\"\n + str(second), '%Y-%m-%d %H:%M:%S')", "def asdatetime(self):\n tznaive = self.timezoneNaive()\n if tznaive:\n tzinfo = None\n else:\n tzinfo = _TZINFO[self._tz].tzinfo\n second = int(self._second)\n microsec = self.micros() % 1000000\n dt = datetime(self._year, self._month, self._day, self._hour,\n self._minute, second, microsec, tzinfo)\n return dt", "def gen_date_with_mins(date):\n datetime_info = date.split(', ')\n time = convert_12_to_24(datetime_info[0])\n month, day = datetime_info[1].split(' ')\n year = datetime_info[2]\n day, year = map(int, [day, year])\n date = datetime.date(year, MONTHS[month.capitalize()], day)\n time = datetime.time(time.hour, time.minute)\n return date, time", "def parse_date(date) -> datetime:\n\n if type(date) == datetime:\n return date\n try:\n date_object = datetime.strptime(date.replace(\" \", \"\"), \"%m/%d/%Y\")\n except (TypeError, ValueError) as exc:\n print(\"Cannot format time \" + str(exc), file=sys.stderr)\n return None\n return date_object", "def insure_date(d):\n if isinstance(d, BeautifulDate):\n return date(year=d.year, month=d.month, day=d.day)\n else:\n return d", "def _date_to_datetime(value):\r\n assert isinstance(value, datetime.date)\r\n return datetime.datetime(value.year, value.month, value.day)", "def test_opt_datetimeMissingOpenQuote(self):\n line = b'not \"'\n dt, remainder = self.server.opt_datetime(line)\n self.assertIsNone(dt)\n self.assertEqual(remainder, line)", "def test_003_not_enough_datetimes() -> None:\n df = generate_test_data()\n df = df.head(2)\n skim(df)", "def start(self):\n if self.start_time is None:\n time = datetime.time(hour=19, tzinfo=CET)\n else:\n time = self.start_time.replace(tzinfo=CET)\n return datetime.datetime.combine(self.date, time)", "def test_missing_dates(self):\n task = Task({\n 'name': 'test',\n 'id': 1,\n 'stage_id' : [1, 'name'],\n 'date_deadline': False,\n 'date_start': False,\n 'date_end': False,\n 'full_project_name': 'test_project',\n 'partial_messages': [{'date':'2018-10-21 12:00:00'}],\n 'kanban_state': 'blocked',\n 'planned_hours': 100,\n 'priority': '1'\n })\n self.assertIsNotNone(task)", "def test_long_not_configured(self):\n locale = {\n 'timeformat': '%H:%M',\n 'dateformat': '%Y-%m-%d',\n 'longdateformat': '',\n 'datetimeformat': '%Y-%m-%d %H:%M',\n 'longdatetimeformat': '',\n }\n assert (dt.datetime(2017, 1, 1), True) == guessdatetimefstr(\n '2017-1-1'.split(), locale=locale, default_day=dt.datetime.today())\n assert (dt.datetime(2017, 1, 1, 16, 30), False) == guessdatetimefstr(\n '2017-1-1 16:30'.split(), locale=locale, default_day=dt.datetime.today())", "def test_non_std_from_json(self):\r\n now = datetime.datetime.now(UTC())\r\n delta = now - datetime.datetime.fromtimestamp(0, UTC())\r\n self.assertEqual(DateTest.date.from_json(delta.total_seconds() * 1000),\r\n now)\r\n yesterday = datetime.datetime.now(UTC()) - datetime.timedelta(days=-1)\r\n self.assertEqual(DateTest.date.from_json(yesterday), yesterday)", "def test_datetime(self):\n self.assertEqual(datetime.datetime.min, self.__jenkins.datetime(*('job',)))", "def parse_http_date(date_str: Optional[str]) -> Optional[datetime.datetime]:\n if date_str is not None:\n timetuple = parsedate(date_str)\n if timetuple is not None:\n with suppress(ValueError):\n return datetime.datetime(*timetuple[:6], tzinfo=datetime.timezone.utc)\n return None" ]
[ "0.68381727", "0.6782348", "0.6598675", "0.6525046", "0.6244653", "0.6082294", "0.6023994", "0.6016812", "0.6003925", "0.59741217", "0.5968216", "0.5952586", "0.59485435", "0.5870531", "0.586283", "0.5861185", "0.58541614", "0.58484566", "0.58302844", "0.5829411", "0.5792652", "0.57873815", "0.5779838", "0.57771826", "0.57753366", "0.57640463", "0.57601213", "0.5751714", "0.5727928", "0.57147384", "0.5705568", "0.57002425", "0.5689323", "0.5671361", "0.56711596", "0.56516415", "0.5648376", "0.564055", "0.56393015", "0.5637017", "0.5630173", "0.5612786", "0.5578744", "0.55560666", "0.55421716", "0.55418473", "0.55305374", "0.5527414", "0.5511361", "0.55022234", "0.5495721", "0.54867303", "0.54867303", "0.5478", "0.54671496", "0.54653186", "0.54580337", "0.545025", "0.54438716", "0.5436749", "0.54366887", "0.5427286", "0.54263115", "0.54250556", "0.5419373", "0.54193103", "0.54040277", "0.5393681", "0.5391813", "0.53777164", "0.5368631", "0.53594345", "0.535749", "0.535407", "0.53489107", "0.53460616", "0.5345644", "0.53432727", "0.53251684", "0.53012276", "0.52997154", "0.52980906", "0.52979326", "0.5293398", "0.5289554", "0.5289059", "0.5288311", "0.5269124", "0.5267468", "0.5267312", "0.52652055", "0.5254049", "0.5238589", "0.5236481", "0.5230343", "0.5224986", "0.5211565", "0.5206589", "0.5200262", "0.51879364" ]
0.6744739
2
Ensure a missing date returns the empty string.
def test_non_input(self): from sosbeacon.utils import format_datetime encoded = format_datetime(None) self.assertEqual('', encoded)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strip_date(self, arg, line_number=0):\n try:\n dt = datetime.strptime(arg, \"%d %b %Y\")\n return dt\n except ValueError:\n raise ValueError(f\"US42 - Illegitimate date of {arg}. GEDCOM line: {line_number}\")\n else:\n return 'NA'", "def test_bad_date_1(self):\n result = self.client.get(\"/search?origin=ORD%2C+Chicago+IL&destination=DFW%2C+Dallas+TX&date=2017-01-01\")\n self.assertNotIn('<meter value=\"70\"', result.data)\n self.assertIn('enter a valid date', result.data)", "def _cleanup_date(self, date):\n if (not date or\n not isinstance(date, str) or\n 'N/A' in date):\n return None\n\n date, time = date.split()\n day, month, year = date.split('.')\n hour, minute = time.split(':')\n year, month, day, hour, minute = [int(x) for x in (year, month, day, hour, minute)]\n\n # For 2-digit years, 1969/2068 is the wrap-around (POSIX standard)\n if (69 <= year < 100):\n year += 1900\n elif (0 <= year < 69):\n year += 2000\n\n return datetime.datetime(year, month, day, hour, minute)", "def clean_date(self):\n input_day = self.cleaned_data.get('day')\n input_date = self.cleaned_data.get('date')\n if input_date < datetime.date.today():\n raise forms.ValidationError(\"Can not create a lesson in the past.\")\n elif input_date.strftime(\"%A\").lower() != input_day:\n raise forms.ValidationError(input_date.strftime(\"%d-%m-%Y\")+\" does not fall on a \"+input_day.title()+\".\")\n return input_date", "def error_imaginary_date(user: discord.User, date_arg: str) -> str:\n return (\n f\"{user.mention}, you might need to check you're calendar!\"\n f\" '{date_arg}' doesn't exist!\"\n )", "def format_date(value):\n try:\n return value.strftime('%d/%m/%Y')\n except: \n return \"\"", "def test_hotshot_check_date_error(self):\n try:\n check_date('N/A', 'N/A', '20.11.2015')\n except ValueError as error:\n self.assertEqual(type(error), ValueError)", "def test_bad_date_2(self):\n result = self.client.get(\"/search?origin=ORD%2C+Chicago+IL&destination=DFW%2C+Dallas+TX&date=2020-01-01\")\n self.assertNotIn('<meter value=\"70\"', result.data)\n self.assertIn('enter a valid date', result.data)", "def validate_date_field(self, field: dict, value: str):\n if field.get(\"required\") and value.strip() == \"\":\n return f\"{field.get('label')} is required!\"\n\n try:\n datetime.datetime.strptime(value, self.config.get(\"date_format\"))\n except ValueError:\n return f\"{field.get('label')} should be a date with the format provided in \" \\\n f\"config {self.config.get('date_format')}\"\n\n return \"\"", "def test_bad_date_3(self):\n result = self.client.get(\"/search?origin=ORD%2C+Chicago+IL&destination=DFW%2C+Dallas+TX&date=\")\n self.assertNotIn('<meter value=\"70\"', result.data)\n self.assertIn('enter a valid date', result.data)", "def test_missing_report_datetime(self):\n self.assertEqual(datetime.datetime.min, self.__uft.datetime('raise'))", "def check_date(message, param):\n while True:\n try:\n day, month, year = input(message).split(param)\n return str(datetime.datetime(int(year), int(month), int(day)).strftime(\"%d/%m/%Y\"))\n except ValueError:\n continue", "def get_date_or_none(date_str, date_format='%Y-%m-%d'):\n try:\n return datetime.strptime(date_str, date_format).date()\n except (ValueError, TypeError):\n return None", "def verify_date_field(field, data: str) -> str:\n if field in ['started_at', 'ended_at', 'created_at', 'performed_at', 'issued_at', 'expires_at']:\n content = string_to_date(data)\n else:\n content = data\n\n return content", "def clean_date(self):\r\n from datetime import datetime\r\n\r\n date = self.cleaned_data[\"date\"]\r\n if date < datetime.now():\r\n self.add_error(\"date\", \"You cannot add a date for the past.\")\r\n return date", "def _fill_date(self):\n if not self.date['year']:\n self.date['year'] = self.DEFAULT_DATE['year']\n if not self.date['month']:\n self.date['month'] = self.DEFAULT_DATE['month']\n if not self.date['day']:\n self.date['day'] = self.DEFAULT_DATE['day']", "def get_date(text=\"\"):\n clear()\n date = input(\"Enter {}date (Format:YYYY-MM-DD): \".format(text))\n try:\n datetime.datetime.strptime(date, \"%Y-%m-%d\")\n except ValueError:\n input(\"Please enter date in this format: YYYY-MM-DD.\"\n \" Press enter to continue.\")\n return get_date()\n else:\n return date", "def test_validate_date_entry_returns_correct_ValueError(self):\n date_string = \"2018-21-01\"\n date_format = settings.DATE_FORMATS['iso 8601']\n\n error_text = \"{} is not valid in format {}\".format(\n date_string,\n date_format['UI format']\n )\n\n result = self.menu.validate_date_entry(date_string, date_format)\n\n expected_result = (error_text, None)\n\n self.assertEqual(result, expected_result)", "def validate(self):\n\n if not (self.from_currency and self.to_currency):\n return \"Please you should provide two currencies\"\n if not self.date:\n self.date = \"latest\"\n else:\n try:\n datetime.strptime(self.date, \"%Y-%m-%d\")\n except ValueError as err:\n return str(err)", "def test_validate_date_entry_returns_correct_outOfBounds_if_future(self):\n date_string = \"3018-01-21\"\n date_format = settings.DATE_FORMATS['iso 8601']\n\n error_text = \"dates in the future are not permitted\"\n\n result = self.menu.validate_date_entry(date_string, date_format)\n\n expected_result = (error_text, None)\n\n self.assertEqual(result, expected_result)", "def test_date_partial_only(self):\n term, rmd = util.parse_date(\"1902\")\n ym = util.parse_date_partial(term)\n self.assertEqual(ym, \"2019-02\")\n self.assertEqual(rmd, \"\", \"Should have no remainder\")", "def test_out_of_range(self):\n term, rmd = util.parse_date(\"0699\")\n self.assertIsNone(util.parse_date_partial(term))", "def clean_date_firstrade(datestr):\n return datetime.datetime.strptime(datestr, '%m/%d/%Y').strftime('%Y-%m-%d')", "def current_valid_date(self):\r\n return datetime.datetime.now().strftime('%Y-%m-%d')", "def _set_dt_string(self):\n if self.period is Period.WEEKLY:\n self._set_date_weekly()\n try:\n self._dt_string = self._get_datetime_or_error().strftime(self._dt_format)\n except ValueError:\n self._dt_string = \"\"\n raise InvalidDateError(detail={\n \"message\": \"Invalid Date Provided\",\n \"period\": self.period.value,\n \"date\": self._given_date\n })", "def test_validate_date_entry_returns_correct_outOfBounds_if_past(self):\n date_string = \"1899-12-12\"\n date_format = settings.DATE_FORMATS['iso 8601']\n\n error_string = \"dates before {} are not permitted\"\n date_fmt = \"%Y-%m-%d\"\n earliest_date = self.menu.OPTIONS['earliest allowed date']\n earliest_date_string = earliest_date.strftime(date_fmt)\n\n error_text = error_string.format(earliest_date_string)\n\n result = self.menu.validate_date_entry(date_string, date_format)\n\n expected_result = (error_text, None)\n\n self.assertEqual(result, expected_result)", "def check_for_date(date_str):\r\n try:\r\n if rex.match(\"\\d\\d\\d\\d-\\d\\d-\\d\\d\", str(date_str)) is None:\r\n raise sqlErr(\"Not a Date!\")\r\n except Exception as e:\r\n raise e", "def _get_datetime_or_error(self) -> dt:\n return dt(\n *(self.date[key] for key in ['year', 'month', 'day'] if key in self.date)\n )", "def get_date_if_not_none(element):\n if element is None:\n text = None\n else:\n text = element.attrs.get('title')\n return text", "def check_date(date):\n\timport datetime\n\tif date < datetime.date.today():\n\t\treturn date\n\telse:\n\t\treturn datetime.date.today()", "def test_unique_for_date_with_nullable_date(self):\n FlexibleDatePost.objects.create(\n title=\"Django 1.0 is released\",\n slug=\"Django 1.0\",\n subtitle=\"Finally\",\n posted=datetime.date(2008, 9, 3),\n )\n p = FlexibleDatePost(title=\"Django 1.0 is released\")\n p.full_clean()\n\n p = FlexibleDatePost(slug=\"Django 1.0\")\n p.full_clean()\n\n p = FlexibleDatePost(subtitle=\"Finally\")\n p.full_clean()", "def valid_date(s):\n try:\n return datetime.datetime.strptime(s, \"%Y-%m-%d\")\n except ValueError:\n print(\"Error: Not a valid date: '{0}'.\".format(s))\n\n return", "def test_bad_base_date(self):\n self.base_date = 'bad'\n response = self._get(get_kwargs=self._data())\n self._check_response(response, 104)", "def process_datetime(a_date: datetime) -> str:\n\n return str(a_date.date()) if a_date else Presenter.DEFAULT", "def date():\r\n try:\r\n curr_date = datetime.datetime.now().strftime(\"%b %d %Y\")\r\n except Exception as e:\r\n print(e)\r\n curr_date = False\r\n return curr_date", "def knowledge_date_valid(record):\n today = datetime.now(timezone.utc).date().strftime(\"%Y-%m-%d\")\n gen_date = record['knowledge_date'].strftime(\"%Y-%m-%d\")\n assert gen_date == today", "def _check_date_not_in_future(self, date):\n if date is None:\n pass\n else:\n assert (\n date <= datetime.datetime.now()\n ), \"Provided date cannot be in the future\"", "def date_or_none(date_str: str | None | dt.date | dt.datetime) -> dt.date | None:\n\n if not date_str:\n return None\n\n if isinstance(date_str, dt.datetime):\n return date_str.date()\n\n if isinstance(date_str, dt.date):\n return date_str\n\n if \" \" in date_str and len(date_str) > 10:\n return dt.datetime.strptime(date_str, \"%d %B %Y\").date()\n\n p_date_str = date_str.replace(\"/\", \"-\").replace(\".\", \"-\")\n date_split = p_date_str.split(\"-\")\n\n if len(date_split) > 3 or len(date_split[-1]) > 4:\n raise ValidationError(f\"Date {date_str} not in parsable format\")\n\n if len(date_split[0]) == 4:\n date_format = \"%Y-%m-%d\"\n elif len(date_split[-1]) == 4:\n date_format = \"%d-%m-%Y\"\n else:\n date_format = \"%d-%m-%y\"\n\n return dt.datetime.strptime(p_date_str, date_format).date()", "def info_date_unknown(out_datefirst: OutputCommonData = OutputCommonData(\"cwb.datefirst\"),\n out_datelast: OutputCommonData = OutputCommonData(\"cwb.datelast\")):\n logger.info(\"No date information found in corpus\")\n\n # Write datefirst and datelast files\n out_datefirst.write(\"\")\n out_datelast.write(\"\")", "def date_format(date) -> str:\n if date is not None:\n str_ = date.strftime(DATE_FMT).upper()\n else:\n str_ = \" \"\n return str_", "def test_missing_dates(self):\n task = Task({\n 'name': 'test',\n 'id': 1,\n 'stage_id' : [1, 'name'],\n 'date_deadline': False,\n 'date_start': False,\n 'date_end': False,\n 'full_project_name': 'test_project',\n 'partial_messages': [{'date':'2018-10-21 12:00:00'}],\n 'kanban_state': 'blocked',\n 'planned_hours': 100,\n 'priority': '1'\n })\n self.assertIsNotNone(task)", "def test_07_no_future_records(self):\n bad_date = timezone.now() + timedelta(days=1)\n record = SwimRecord(record_date=bad_date)\n try:\n record.full_clean()\n except ValidationError as e:\n self.assertTrue(\"Can't set record in the future.\" in e.message_dict['record_date'])", "def enter_date():\n valid_data = False\n # used to keep track of the values and change them in other scopes\n input_data = {'date': ''}\n\n while not valid_data:\n input_data['date'] = get_input(\"Date of the task\" + \"\\n\" + \"Please use DD/MM/YYYY format: \")\n if re.match('\\d{2}/\\d{2}/\\d{4}', input_data['date']):\n try:\n datetime.datetime.strptime(input_data['date'], '%d/%m/%Y')\n except ValueError:\n clean_scr()\n get_input(\"Enter a valid date. Press enter to try again.\")\n else:\n valid_data = True\n clean_scr()\n\n return input_data['date']", "def is_valid_date(date):\n\n try:\n parse(date)\n return date\n except:\n new_date = raw_input(\"Invalid date, try again: YYYY-MM-DD \")\n return is_valid_date(new_date)", "def remove_futuredate(date_field, date_format):\r\n newdate = check_dateformat(date_field, date_format)\r\n if not newdate:\r\n raise SIDException('Invalid Date')", "def must_be_valid_date_in_ddmmyyyy(cell):\n # Check if it's nan first\n if check_empty(cell):\n return True\n return pd.isna(get_date_ddmmyyyy(cell))", "def test_opt_datetimeMissingOpenQuote(self):\n line = b'not \"'\n dt, remainder = self.server.opt_datetime(line)\n self.assertIsNone(dt)\n self.assertEqual(remainder, line)", "def error_future_date(user: discord.User, date_arg: str) -> str:\n return (\n f\"{user.mention}, are you some sort of time traveler!?\"\n f\" '{date_arg}' hasn't happened yet!\"\n )", "def _sanitize_date(self, datestr):\n nums = [int(x) for x in datestr.split('/')]\n padded = [\"{:0>2}\".format(x) for x in nums]\n return \"/\".join(padded)", "def fake_date_without_day(value):\n return date(year=value[0], month=value[1], day=1)", "def test_invalid_beginning_of_year(self):\n year, month, day, error = clean_year_month_day(2014, 12, 31, 1)\n self.assertEqual(year, 2015)\n self.assertEqual(month, 1)\n self.assertEqual(day, 1)\n self.assertEqual(error, False)", "def form_InputDateNoneValue(request):\n schema = schemaish.Structure()\n schema.add('inputStrip', schemaish.Date(default=datetime.date(1900,1,1)))\n\n form = formish.Form(schema, 'form')\n form['inputStrip'].widget = formish.Input(empty=datetime.date(1900,1,1),roundtrip_empty=True)\n return form", "def test_none_handling(self):\r\n dt = DatetimeTest.create(test_id=0, created_at=None)", "def test_date_with_zero_hours(self):\n from sosbeacon.utils import format_datetime\n\n date = datetime(year=2012, month=8, day=30, hour=0, minute=13)\n encoded = format_datetime(date)\n self.assertEqual('08/30/12 00:13', encoded)", "def test_invalid_date_format(self):\n date_field = 'expiry_date'\n self.batch_data['expiry_date'] = date_field\n resp = self.query_with_token(\n self.access_token, batch_info_query.format(**self.batch_data))\n self.assertIn(\n 'invalid literal',\n resp['errors'][0]['message'])", "def test_none_handling(self):\n dt = DatetimeTest.create(test_id=0, created_at=None)", "def form_DateDifferentEmpty(request):\n schema = schemaish.Structure()\n schema.add('myDateField', schemaish.Date())\n form = formish.Form(schema, 'form')\n form['myDateField'].widget = formish.Input(empty=datetime.date.today())\n return form", "def valid_date(date):\n import datetime\n try:\n datetime.datetime.strptime(date, '%Y-%m-%d')\n except ValueError:\n raise ValueError(\"Incorrect data format, should be YYYY-MM-DD\")", "def test_datetime_with_naive_duedate_only_fails(self):\n # setup\n specify_wd = self.project.activity('Specify wheel diameter')\n\n # save old values\n old_start, old_due = datetime.strptime(specify_wd._json_data.get('start_date'), ISOFORMAT), \\\n datetime.strptime(specify_wd._json_data.get('due_date'), ISOFORMAT)\n naive_duedate = datetime(2017, 6, 5, 5, 0, 0)\n with warnings.catch_warnings(record=False) as w:\n warnings.simplefilter(\"ignore\")\n specify_wd.edit(due_date=naive_duedate)\n\n # teardown\n with warnings.catch_warnings(record=False) as w:\n warnings.simplefilter(\"ignore\")\n specify_wd.edit(due_date=old_due)", "def search_display_date(self):\n return ''", "def __get_date(measurement):\n return ast.literal_eval(measurement).get('date') if measurement else 'unknown date'", "def date(self, date):\n self.value = date.strftime(\"%Y-%m-%d\") if date else \"\"", "def test_datetime_on_missing_values(self, mock_url_read):\n mock_url_read.return_value = '{\"value\": []}'\n self.assertEqual(datetime.datetime.min, self.__report.datetime('id'))\n mock_url_read.assert_called_once_with(\n 'http://url/Cxwebinterface/odata/v1/Projects?$expand=LastScan&$filter=Name%20eq%20%27id%27')", "def format_date(date, format='%m/%d/%Y'):\n if date is not None:\n return \"%02d/%02d/%04d\" % (date.month, date.day, date.year)\n else:\n return ''", "def to_date_or_none(value: Optional[Union[datetime.date, str]]) -> Optional[datetime.date]:\n if isinstance(value, datetime.date):\n return value\n if value is None or value == '000000':\n return None\n return datetime.datetime.strptime(value, '%d%m%y').date()", "def get_date(date):\n return date", "def get_date_or_none(obj, key):\n try:\n return datetime.strptime(obj[key], '%Y-%m-%d')\n except (KeyError, ValueError):\n return None", "def test_range__no_base_date(self):\n data = self._data()\n data.pop('base_date')\n response = self._get(get_kwargs=data)\n self._check_response(response, 104)", "def valid_date(input_date):\n try:\n input_dt = dt.datetime.strptime(input_date, \"%Y-%m-%d\")\n return input_date\n except ValueError:\n msg = \"Not a valid date: '{0}'.\".format(input_date)\n raise argparse.ArgumentTypeError(msg)", "def test_date_to_string_returns_valid_string(self):\n date_string = \"2018-01-21\"\n date_format = \"%Y-%m-%d\"\n date_object = datetime.datetime.strptime(\n date_string,\n date_format\n )\n result = self.menu.date_to_string(date_object)\n\n self.assertEqual(result, date_string)", "def date_to_string(my_date):\n if my_date:\n return my_date.strftime(DATE_FORMAT)\n return None", "def test_format_date(self):\n assert BaseTSVWriter.format_date(FIXED_DATE) == '2016/05/15'", "def format_date(date):\n try:\n start_date = datetime.strftime(date, '%m/%d/%Y')\n except (TypeError, ValueError) as e:\n start_date = date\n pass\n return start_date", "def norm_date(dt):\n if not dt:\n return None\n dt = at.date_to_datetime(dt)\n if 1900 < dt.year < 3000:\n return dt.strftime(at.DATE_YMD_FORMAT)", "def test_no_due_date(self):\r\n node = object()\r\n self.assertEqual(self.call_fut(node), None)", "def search_date(self, text='date'):\n\n date = input(f\"\\nEnter a {text} (MM-DD-YYYY): \")\n date_obj = datetime.strptime(date, \"%m-%d-%Y\")\n\n try:\n date = datetime.strftime(date_obj, \"%m-%d-%Y\")\n return date\n except ValueError:\n input(\"\\nFormat of date must be MM-DD-YYYY\\n\")\n return self.search_date()", "def date_specificity(date_string):\n length = len(date_string)\n if length == 10:\n return 'ymd'\n elif length == 7:\n return 'ym'\n elif length == 4:\n return 'y'\n return None", "def set_non_recurring_charge_begin_date(self, begin_date):\n if begin_date is None:\n statement_summary_begin_date_element = self.wait().until(EC.presence_of_element_located(self.statement_details_begin_date_locator), 'statement details begin date locator not found before specified time out')\n non_recurring_charge_begin_date = str(statement_summary_begin_date_element.text)\n elif begin_date == \"\":\n current_date = datetime.date.today()\n non_recurring_charge_begin_date = \"%d/%d/%d\" % (current_date.month, current_date.day, current_date.year)\n else:\n non_recurring_charge_begin_date = begin_date\n self.set_value_into_input_field(self.non_recurring_charge_begin_date_locator, non_recurring_charge_begin_date)\n return non_recurring_charge_begin_date", "def test_convert_date_error(self):\n try:\n convert_to_date('N/A', FORMAT_CALENDAR)\n except ValueError as error:\n self.assertEqual(type(error), ValueError)", "def _validate(year, month, day):\n if day is not None and month is None:\n raise ValueError(\"Day without month\")\n if day is None:\n day = 1\n if month is None:\n month = 1\n if year is None:\n year = 2000\n # actual validation happens here\n datetime.date(year, month, day)", "def validate_date(date_str):\n\ttry:\n\t\treturn (datetime.strptime(date_str, \"%Y-%m-%d\"), \"Date format matched\")\n\texcept Exception as e:\n\t\tlog.error(sys.exc_info()[0], e)\n\t\treturn (None, \"Date should be of YYYY-MM-DD format\")", "def test_validate_invalid_firms_conversion_with_invalid_date():\n firms_conversion_json = copy.deepcopy(FIRMS_CONVERSION)\n firms_conversion_json['startDate'] = \"test\"\n legal_filing = {'conversion': firms_conversion_json}\n\n is_valid, errors = validate(legal_filing, 'conversion')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid", "def test_validate_date_entry_returns_correct_iso_date(self):\n date_string = \"2018-01-21\"\n date_format = settings.DATE_FORMATS['iso 8601']\n date_object = datetime.datetime.strptime(\n date_string,\n date_format['datetime format'])\n\n result = self.menu.validate_date_entry(date_string, date_format)\n\n expected_result = (None, date_object)\n\n self.assertEqual(result, expected_result)", "def test_param_year_is_none(self):\n test_date = get_by_values(Ordinal.first, Weekday.Saturday, Month.May)\n self.assertEquals(date.today().year, test_date.year)", "def clean_expiration_date(self):\n expiration_date = self.cleaned_data['expiration_date']\n if expiration_date.date() <= datetime.date.today():\n v_err('elapsed')\n return expiration_date", "def format_date(data, format_string='%Y-%m-%d'):\n if (data == '') or 'BC' in data:\n return None\n return datetime.strptime(data, format_string)", "def _earliest_allowed_date(cls):\r\n DAYS_GOOD_FOR = settings.VERIFY_STUDENT[\"DAYS_GOOD_FOR\"]\r\n allowed_date = (\r\n datetime.now(pytz.UTC) - timedelta(days=DAYS_GOOD_FOR)\r\n )\r\n return allowed_date", "def convert_date(datestring):\n datestring = datestring.rstrip('†')\n if datestring not in ('NA', 'None specified', 'TBA', 'None', 'N/A', ''):\n try:\n return dateutil_parser.parse(datestring).date()\n except ValueError: # dateutil's error messages aren't friendly\n raise ValueError(\"Not a date: {0}\".format(datestring))", "def _date_from_str(self, date_entry, date_str):\n dt_obj = None\n if date_str:\n dt_obj = parser.parse(date_str)\n if dt_obj < MIN_DATE or dt_obj > MAX_DATE:\n prompt = 'Please keep dates within Jan 1, 2015 up to today.'\n raise ValueError(prompt)\n \n return dt_obj", "def test_invalid_date(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 1233, 'date_of_expense': 'fgjfj'})\n self.assertEqual(res.status_code, 400)\n results = json.loads(res.data)\n self.assertEqual(results['message'], 'The date fgjfj does not match the format DD-MM-YYYY')", "def dump_date(data):\n if data is None:\n return None\n return data.strftime(\"%Y-%m-%d\")", "def dump_date(data):\n if data is None:\n return None\n return data.strftime(\"%Y-%m-%d\")", "def validate_date(value):\n if date_regex.fullmatch(value):\n return True\n else:\n return False", "def _validate_date(mapping: Mapping[str, Any],\n ref: str) -> Optional[SchemaError]:\n if 'format' in mapping:\n token_lines = None # type: Optional[List[List[lexery.Token]]]\n try:\n token_lines = mapry.strftime.tokenize(format=mapping['format'])\n except (lexery.Error, NotImplementedError) as err:\n return SchemaError(str(err), ref='{}/format'.format(ref))\n\n valerr = mapry.strftime.validate_date_tokens(token_lines=token_lines)\n if valerr is not None:\n return SchemaError(str(valerr), ref='{}/format'.format(ref))\n\n return None", "def test_created_date_nonexistent(self):\n qs = {'a': 1, 'w': 2, 'format': 'json', 'created': 1}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(200, response.status_code)", "def date():\r\n while True:\r\n clear()\r\n task_date = input(\"When was this task performed? Date format: dd-mm-yyyy \\n > \").strip()\r\n try:\r\n task_date = datetime.datetime.strptime(task_date, \"%d-%m-%Y\")\r\n if task_date.date() > datetime.datetime.today().date():\r\n\r\n input(\" Sorry, date can't be later than today's date. Press enter and provide a correct date \")\r\n continue\r\n\r\n except ValueError:\r\n input(\" Sorry, not a valid date. Press enter and provide a correct date... \")\r\n continue\r\n\r\n except Exception: \r\n raise(\"Something went wrong.\")\r\n input(\"Press enter to continue...\")\r\n continue \r\n\r\n else:\r\n return task_date.strftime(\"%d-%m-%Y\")", "def missing_info(**kwargs) -> str:\n func = kwargs.pop('func', 'unknown')\n if 'ticker' in kwargs: kwargs['ticker'] = kwargs['ticker'].replace('/', '_')\n for dt in ['dt', 'start_dt', 'end_dt', 'start_date', 'end_date']:\n if dt not in kwargs: continue\n kwargs[dt] = utils.fmt_dt(kwargs[dt])\n info = utils.to_str(kwargs, fmt='{value}', sep='/')[1:-1]\n return f'{func}/{info}'", "def check_dateformat(date_field, date_format='YYYY-MM-DD'):\r\n if not date_format or not date_field:\r\n return None\r\n # format = \"%Y-%m-d\"\r\n date_field = date_field.strip()\r\n\r\n try:\r\n dd = None\r\n mm = None\r\n yyyy = None\r\n seperator = '-'\r\n date_part = date_field\r\n time_part = None\r\n if '/' in date_field:\r\n seperator = '/'\r\n if ' ' in date_field:\r\n (date_part, time_part) = date_field.split(' ')\r\n\r\n if not time_part:\r\n if date_format == 'DD-MM-YYYY' or date_format == 'DD/MM/YYYY':\r\n (dd, mm, yyyy) = date_part.split(seperator)\r\n elif date_format == 'YYYY-MM-DD' or date_format == 'YYYY/MM/DD':\r\n (yyyy, mm, dd) = date_part.split(seperator)\r\n elif date_format == 'YYYY-DD-MM' or date_format == 'YYYY/DD/MM':\r\n (yyyy, dd, mm) = date_part.split(seperator)\r\n yyyy = int(yyyy)\r\n dd = int(dd)\r\n mm = int(mm)\r\n date_part = date(yyyy, mm, dd)\r\n return date_part\r\n else:\r\n raise SIDException(\r\n 'Invalid Date: datetime not supported', 'datetime')\r\n # to support further \"%d/%m/%Y %H:%M:%S\"\r\n\r\n # date_string = str(yyyy) + '-' + str(mm) + '-' + str(dd)\r\n # return datetime.strptime(date_string, format)\r\n\r\n except Exception:\r\n raise SIDException('Invalid Date', 'check_dateformat')", "def test_datetime_missing(self, mock_url_read):\n mock_url_read.return_value = '{\"value\": [{\"LastScan\": {}}]}'\n self.assertEqual(datetime.datetime.min, self.__report.datetime('id'))\n mock_url_read.assert_called_once_with(\n 'http://url/Cxwebinterface/odata/v1/Projects?$expand=LastScan&$filter=Name%20eq%20%27id%27')", "def test_relative_date(self):\n self.assertEqual(self.show.relative_date, None)" ]
[ "0.66881824", "0.6186882", "0.61217904", "0.61152357", "0.60820913", "0.6069122", "0.6046784", "0.60390896", "0.60354966", "0.602315", "0.59958637", "0.59915537", "0.5989876", "0.59784245", "0.5972851", "0.5956638", "0.5951182", "0.5942427", "0.59409153", "0.59297127", "0.5917569", "0.5901993", "0.58746034", "0.58741814", "0.5867042", "0.5855193", "0.58486074", "0.5835939", "0.5834874", "0.58304167", "0.5809336", "0.58074427", "0.5782217", "0.57805157", "0.57761174", "0.5759498", "0.5740554", "0.5739473", "0.5714288", "0.5702908", "0.5676373", "0.5665953", "0.5654771", "0.56496793", "0.5643615", "0.563389", "0.56332535", "0.5617478", "0.5605993", "0.56054264", "0.5604918", "0.5598814", "0.5589842", "0.5587963", "0.55814004", "0.5571713", "0.5571427", "0.55698943", "0.5569778", "0.5561567", "0.5546411", "0.55435675", "0.553083", "0.5530087", "0.5523437", "0.55221367", "0.55168754", "0.5516582", "0.5513567", "0.5492065", "0.5490428", "0.5489398", "0.54752153", "0.5460238", "0.54469436", "0.5434658", "0.542795", "0.54240423", "0.54163796", "0.54044354", "0.54009956", "0.5399635", "0.53988653", "0.5386205", "0.53854144", "0.53844285", "0.5378602", "0.53769064", "0.53747076", "0.53729326", "0.53630984", "0.53630984", "0.53500795", "0.5349537", "0.53462046", "0.53414613", "0.5337823", "0.5337751", "0.53347594", "0.5328997" ]
0.69573593
0
Ensure a missing lhs returns rhs.
def test_no_lhs(self): from sosbeacon.utils import get_latest_datetime lhs = None rhs = object() result = get_latest_datetime(lhs, rhs) self.assertIs(rhs, result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_no_rhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = object()\n rhs = None\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(lhs, result)", "def not_equal(lhs, rhs):\n return _make.not_equal(lhs, rhs)", "def isnone(cls, lhs, rhs):\n if rhs:\n return lhs is None\n else:\n return lhs is not None", "def test_rightHandArgumentImplementsUnequality(self):\n self.assertFalse(Record(1, 2) != EqualToEverything())\n self.assertTrue(Record(1, 2) != EqualToNothing())", "def check_nondiff_rop(self, y):\r\n raised = False\r\n try:\r\n tmp = tensor.Rop(y, self.x, self.v)\r\n except ValueError:\r\n raised = True\r\n if not raised:\r\n self.fail((\r\n 'Op did not raise an error even though the function'\r\n ' is not differentiable'))", "def binop(x, y, op):\n\n if (x is None) and (y is None):\n return None\n\n x = x if (x is not None) else 0\n y = y if (y is not None) else 0\n return op(x,y)", "def __none_left_mult(x, y):\n if x is not None:\n return x * y\n return None", "def __ne__(self, rhs):\n return not self.__eq__(rhs)", "def nonull(val):\n return val if not pd.isnull(val) else None", "def test_notImplementedNotEquals(self):\n self.assertEqual(Comparable(1).__ne__(object()), NotImplemented)", "def add_ignore_empty(x, y):\n\n def _ignore(t):\n return t is None or (isinstance(t, tuple) and len(t) == 0)\n\n if _ignore(y):\n return x\n elif _ignore(x):\n return y\n else:\n return x + y", "def nexact(cls, lhs, rhs):\n return lhs != rhs", "def __ne__(self, other):\n # supported type for operand except Rational\n if isinstance(other, int):\n return self.num - other * self.den != 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den != 0", "def test_notImplementedLessThanEquals(self):\n self.assertEqual(Comparable(1).__le__(object()), NotImplemented)", "def test_missing_to_nan(self, input_data, value, expected):\n actual = data._missing_to_nan(input_data, value)\n pd.testing.assert_series_equal(actual, expected)\n self.assertIsNot(actual, input_data)", "def xor_constrain(a, b):\n if a and not b:\n return a\n if b and not a:\n return b\n if a and b:\n raise ValueError('xor error: both values cannot be True')\n raise ValueError('xor error: both values cannot be False')", "def _check_undefined(self) -> PossibleResult[T]:\n if self.constructor == Undefined:\n if not self.obj is UNDEFINED:\n raise DeserializeError(\n Undefined, self.obj, self.new_depth, self.key\n )\n return self.obj # type: ignore\n return NO_RESULT", "def requires_lvalue(self, *args):\n return _ida_hexrays.cexpr_t_requires_lvalue(self, *args)", "def test_nonEquality(self):\n # Make explicitly sure we're using !=:\n self.assertFalse(Comparable(1) != Comparable(1))\n self.assertTrue(Comparable(2) != Comparable(1))", "def _builtin_neq(arg1, arg2, **kwdargs):\n try:\n unify_value(arg1, arg2, {})\n return False\n except UnifyError:\n return True", "def test_notImplementedEquals(self):\n self.assertEqual(Comparable(1).__eq__(object()), NotImplemented)", "def minimum(lhs, rhs):\n return _make.minimum(lhs, rhs)", "def _check_loss(self, loss):\n assert not np.isnan(loss), \"Model diverged with loss = NaN\"", "def IteratedCompareMixed (lhs, rhs):\n li = iter(lhs)\n ri = iter(rhs)\n while True:\n try:\n (lv, rv) = (next(li), next(ri))\n if lv is None:\n if rv is None:\n continue\n return -1\n if rv is None:\n return 1\n if lv == rv:\n continue\n if lv < rv:\n return -1\n return 1\n except StopIteration:\n nl = len(lhs)\n nr = len(rhs)\n if nl < nr:\n return -1\n if nl == nr:\n return 0\n return 1", "def _null_min(a, b):\n if a is None:\n return b\n if b is None:\n return a\n return min(a, b)", "def get(self, lhs, default=frozenset()):\n return self._rules_by_lhs.get(lhs, default)", "def non_null_validation(x):\n return not pd.isnull(x), {}", "def test_invalid_assignment():\n with pytest.raises(TypeError):\n PropertyAndConditions(property=LinkByUID('id', 'a15'))\n with pytest.raises(TypeError):\n PropertyAndConditions(property=Property(\"property\"),\n conditions=[Condition(\"condition\"), LinkByUID('scope', 'id')])", "def _as_rhs(self):\n raise NotImplementedError", "def __ne__(left, right):\n return (not (left == right))", "def na_cmp():\n return lambda x, y: bool(pd.isna(x.magnitude)) & bool(pd.isna(y.magnitude))", "def __le__(self, other):\r\n return NotImplemented", "def __neq__(self, other): \n return not self == other", "def MissingOperand():\n\n if dec.Asm.Parse_Pointer == 0:\n errors.DoError('missoper', False)\n return True\n else:\n return False", "def operator_lhs(self, inp):\n assert self.operator is not None, \\\n \"Please set an operator with the set_operation method\"\n\n return self.operator_rhs(self.operator.forward(inp))", "def test_retNone1(self):\r\n class retNone(gof.op.Op):\r\n def make_node(self):\r\n inputs = [theano.tensor.vector()]\r\n outputs = [theano.tensor.vector()]\r\n return gof.Apply(self, inputs, outputs)\r\n\r\n def grad(self, inp, grads):\r\n x, = inp\r\n gz, = grads\r\n pass\r\n a = retNone().make_node()\r\n self.assertRaises(TypeError, grad_sources_inputs, [(a.out, one)], None)", "def __ne__(self, other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n if(self.numerator!=other.numerator):\n return True\n else:\n return False", "def data_missing_for_sorting() -> NoReturn:\n raise NotImplementedError", "def data_missing_for_sorting() -> NoReturn:\n raise NotImplementedError", "def missing_link(x):\n return x**2", "def _op_ne(self, left: Any, right: Any) -> BoolOrIter:\n out = self._op_eq(left, right)\n if isinstance(out, (numpy.ndarray, Series)):\n neout = ~out\n # neout[pandas.isna(out)] = numpy.nan\n return neout\n # out is always a numpy.ndarray\n return not out # pragma: no cover", "def __rrshift__(self, other):\r\n return NotImplemented", "def __rrshift__(self, other):\r\n return NotImplemented", "def test_neq():\n # Test for not equal special method with scalar Rnode object and float value\n x = Rnode(2.0)\n try:\n assert (x != 2) == False\n assert (x != 1) == True\n except AssertionError as e:\n print(e)\n raise AssertionError\n\n # Test for equality special method with two scalar Dual object\n x = Rnode(2.0)\n y = Rnode(2.0)\n z = Rnode(1.0)\n try:\n assert (x != y) == False\n assert (x != z) == True\n except AssertionError as e:\n print(e)\n raise AssertionError", "def _builtin_val_neq(a, b, engine=None, **k):\n check_mode((a, b), ['gg'], functor='=\\=', **k)\n a_value = a.compute_value(engine.functions)\n b_value = b.compute_value(engine.functions)\n if a_value is None or b_value is None:\n return False\n else:\n return a_value != b_value", "def _check_other(self, other):\n # pylint: disable=protected-access\n if not isinstance(other, Spectrum):\n raise RuntimeError(\n \"Tried to binop Spectrum and %s\" % (type(other))\n )\n\n if (self._pot is None) != (other._pot is None):\n raise RuntimeError(\n \"Tried to binop Spectra with nonconformant POT\"\n )\n\n if (self._lt is None) != (other._lt is None):\n raise RuntimeError(\n \"Tried to binop Spectra with nonconformant Livetimes\"\n )", "def test_grade_nan(self):\r\n\r\n sample_dict = {'x': (1, 2)}\r\n\r\n # Test problem\r\n problem = self.build_problem(sample_dict=sample_dict,\r\n num_samples=10,\r\n tolerance=\"1%\",\r\n answer=\"x\")\r\n # Expect an incorrect answer (+ nan) to be marked incorrect\r\n # Right now this evaluates to 'nan' for a given x (Python implementation-dependent)\r\n input_formula = \"10*x + 0*1e999\"\r\n self.assert_grade(problem, input_formula, \"incorrect\")\r\n # Expect an correct answer (+ nan) to be marked incorrect\r\n input_formula = \"x + 0*1e999\"\r\n self.assert_grade(problem, input_formula, \"incorrect\")", "def __ne__(self, other):\n return tf.math.not_equal(self._ordinals, other.ordinal())", "def visit_none_type(self, left: NoneType) -> T:", "def notnull(obj):\n res = isnull(obj)\n if is_scalar(res):\n return not res\n return ~res", "def test_node_error_if_nonpresent(graph_no_edges):\n with pytest.raises(ValueError):\n graph_no_edges.adjacent('Raccoon', 'Rocket')", "def less_equal(lhs, rhs):\n return _make.less_equal(lhs, rhs)", "def from_rhs(self, rhs):\n return self._rhs_rules[rhs]", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n else:\n return not result", "def test_getSiblingMissing(self):\n self.store.powerUp(self.contentStore1, ISiblingStore)\n objectId = u'sha256:NOSUCHOBJECT'\n d = self.contentStore2.getSiblingObject(objectId)\n return self.assertFailure(d, NonexistentObject\n ).addCallback(lambda e: self.assertEquals(e.objectId, objectId))", "def __ne__(self, other):\n if other != None:\n return self != other\n else:\n return True", "def __ne__(self, other):\n if other != None:\n return self != other\n else:\n return True", "def find_missing(src, dest):\n\tmissing = [item for item in src if item not in dest]\n\treturn missing", "def _abs (x):\n\n return x if le(nil,x) else -x", "def resolve_against(self, other, **kwargs):\n if isinstance(other, (Not, Or, Eq)):\n return _coconut_tail_call(other.resolve_against, self, **kwargs)\n elif (self.find_unification)(Not(other).simplify(**kwargs)) is not None:\n return bot\n else:\n return None", "def test_neg():\n # Test for negation with scalar Rnode object\n x = Rnode(5.0)\n z = -x\n try:\n assert z.value == -1 * x.value\n except AssertionError as e:\n print(e)\n raise AssertionError", "def test_rightHandArgumentImplementsEquality(self):\n self.assertTrue(Record(1, 2) == EqualToEverything())\n self.assertFalse(Record(1, 2) == EqualToNothing())", "def swap(\n self, src, dest, raise_src=False, raise_dest=False,\n raise_pred=False\n ) -> typing.Optional[Expr]:\n\n if not isinstance(src, int):\n pred = src\n src = None # None for not found, False for duplicated.\n for i, v in enumerate(self.indices):\n if pred(v):\n src = i if src is None else False\n else:\n continue\n if src is None or src is False:\n return _fail(raise_pred)\n\n if src == dest:\n self.decide(dest)\n return _UNITY\n\n for i, j in [(src, raise_src), (dest, raise_dest)]:\n if self.is_decided(i):\n return _fail(j)\n\n indices = self.indices\n src_jm, dest_jm = indices[src], indices[dest]\n # Core swapping.\n indices[src], indices[dest] = dest_jm, src_jm\n # Update bare m dummy mapping.\n src_m, dest_m = src_jm.m_symb, dest_jm.m_symb\n if src_m != dest_m:\n # When src_m and dest_m are both None, no need for any treatment.\n # Or when they are the same symbol, the set of appearances of that\n # symbol does not need to be changed as well.\n for m_symb, old_idx, new_idx in [\n (src_m, src, dest),\n (dest_m, dest, src)\n ]:\n if m_symb is None or m_symb not in self._m_dumms:\n continue\n entry = self._m_dumms[m_symb]\n if self._uniq_m:\n assert entry == old_idx\n self._m_dumms[m_symb] = new_idx\n else:\n entry.remove(old_idx) # Key error when not present.\n assert new_idx not in entry\n entry.add(new_idx)\n\n self.decide(dest)\n return _NEG_UNITY ** self._total_j", "def __lt__(self, other):\n if isinstance(other, RationalFrac):\n # NOTE: neg is False when 0 in numer.\n return (self - other).neg\n elif isinstance(other, (int, float, str)):\n return (self - (RationalFrac(other))).neg\n else:\n return NotImplemented", "def testMergeRejectsUnequalNodes():\n\n n1 = DummyNode(x=1, y=2, z=4)\n n2 = DummyNode(x=1, y=3, z=3)\n\n with pytest.raises(TypeError):\n n1.merge_with(n2)", "def assert_is_not_none(self, expr, msg=None):\r\n assert expr is not None", "def __eq__(self, other: Any) -> bool:\n return isinstance(other, Nothing)", "def equal(lhs, rhs):\n return _make.equal(lhs, rhs)", "def test_wrong_predicate(self):\n\n with pytest.raises(ValueError):\n naughty_case_opt = lq.optimizers.CaseOptimizer(\n (lambda var: False, lq.optimizers.Bop()),\n default_optimizer=optimizers.Adam(0.01),\n )\n\n # Simple MNIST model\n mnist = tf.keras.datasets.mnist\n (train_images, train_labels), _ = mnist.load_data()\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dense(10, activation=\"softmax\"),\n ]\n )\n model.compile(\n loss=\"sparse_categorical_crossentropy\",\n optimizer=naughty_case_opt,\n metrics=[\"acc\"],\n )\n\n # Should raise on first call to apply_gradients()\n model.fit(train_images[:1], train_labels[:1], epochs=1)", "def test_nan_expected(metric_class, nan_strategy, value, expected):\n metric = metric_class(nan_strategy=nan_strategy)\n metric.update(value.clone())\n out = metric.compute()\n assert np.allclose(out, expected, equal_nan=True)", "def test_check_y_not_in_domain_link(default_X_y, default_gam):\n X, y = default_X_y\n gam = default_gam\n\n with pytest.raises(ValueError):\n check_y(y + .1, default_gam.link, default_gam.distribution, verbose=False)", "def test_adjacent_unpresent(graph_with_edges):\n with pytest.raises(ValueError):\n graph_with_edges.adjacent('Captain Picard', 'Star Wars')", "def can_rewrite(self, lhs):\n return len(self[lhs]) > 0", "def none_comparison(func):\n @functools.wraps(func)\n def inner(obj1,obj2):\n if obj1 is not None and obj2 is not None:\n return func(obj1, obj2)\n if obj1 is None and obj2 is None:\n return []\n if obj1 is not None and obj2 is None:\n return Difference(f\"Second {obj1.__class__.__name__} is None\",(obj1,None))\n return Difference(f\"First {obj2.__class__.__name__} is None\",(None,obj2))\n return inner", "def iexact(self, other):", "def __ge__(self, other):\r\n return NotImplemented", "def find_missing(expected, existing):\n return set(expected).difference(existing)", "def not_none(item, alt=None):\r\n\r\n return item if item is not None else alt", "def qual_missing(id_, seq, qual):\r\n return qual is None", "def data_missing(data):\n return type(data)._from_sequence([None, data[0]])", "def __ne__(self, other):\n eq = self.__eq__(other)\n if eq is not NotImplemented:\n return not eq\n else:\n return NotImplemented", "def ne_(a, b, msg=None):\n assert a != b, msg or \"%r == %r\" % (a, b)", "def __ne__(self, other: 'LTL'):\n return not (self == other)", "def merge(self, rhs):\n if self.forwardLabel == rhs.forwardLabel and \\\n self.reverseLabel == rhs.reverseLabel:\n return Relation(self.forwardLabel, self.reverseLabel).fromSequence(\n itertools.chain(self.iteritems(), rhs.iteritems())\n )\n else:\n return Exception, \"Cannot merge relations with different labels\"", "def __ne__(self, other):\n return not_equal(self, other)", "def try_fold_comparison_binop(\n op: ast.ops.Operator, left: irast.Set, right: irast.Set, *,\n ctx: context.ContextLevel) -> typing.Optional[irast.Set]:\n left = left.expr\n right = right.expr\n\n if op == ast.ops.EQ:\n value = left.value == right.value\n elif op == ast.ops.NE:\n value = left.value != right.value\n elif op == ast.ops.GT:\n value = left.value > right.value\n elif op == ast.ops.GE:\n value = left.value >= right.value\n elif op == ast.ops.LT:\n value = left.value < right.value\n elif op == ast.ops.LE:\n value = left.value <= right.value\n else:\n value = None\n\n if value is not None:\n return setgen.ensure_set(\n irast.Constant(value=value, type=ctx.schema.get('std::bool')),\n ctx=ctx)", "def implies(A, B):\n\treturn B or not A", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n return not result", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n return not result", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n return not result", "def unary(op, v):\n if op == \"+\":\n return v\n if op == \"-\":\n return -v\n if op.lower() == \"not\":\n return not(v)\n raise Exception(\"unary op not implemented\")", "def __ne__(self, other):\n return self.isNot(other)", "def test_merge_with_weird_eq():\r\n\r\n #SCALAR CASE\r\n x = T.constant(numpy.asarray(1), name='x')\r\n y = T.constant(numpy.asarray(1), name='y')\r\n g = Env([x, y], [x+y])\r\n MergeOptimizer().optimize(g)\r\n\r\n assert len(g.apply_nodes) == 1\r\n node = list(g.apply_nodes)[0]\r\n assert len(node.inputs) == 2\r\n assert node.inputs[0] is node.inputs[1]\r\n\r\n #NONSCALAR CASE\r\n # This was created to test TensorConstantSignature\r\n x = T.constant(numpy.ones(5), name='x')\r\n y = T.constant(numpy.ones(5), name='y')\r\n g = Env([x, y], [x+y])\r\n MergeOptimizer().optimize(g)\r\n\r\n assert len(g.apply_nodes) == 1\r\n node = list(g.apply_nodes)[0]\r\n assert len(node.inputs) == 2\r\n assert node.inputs[0] is node.inputs[1]", "def is_neq(lhs, rhs, assumptions=None):\n return fuzzy_not(is_eq(lhs, rhs, assumptions))", "def not_equals_success_func(target, result):\n if result is None:\n return False\n return target != result", "def test_disallow_value_with_unify() -> None:\n # Given 'a', this FST will print 'b'\n # Given 'c', this FST will print both 'a', and 'b'\n fst = make_fst(\n # 1 -@D.x.a@-> 5; 5 -0:a-> (2)\n \"1 151 5 0\",\n \"5 0 97 2 0\",\n # 1 -@D.x.b@-> 6; 6 -0:b-> (2)\n \"1 152 6 0\",\n \"6 0 98 2 0\",\n a_and_b=\"unify\",\n )\n\n assert set(fst.generate(\"a\")) == {\"b\"}\n assert set(fst.generate(\"b\")) == {\"a\"}\n assert set(fst.generate(\"c\")) == {\"a\", \"b\"}", "def test_require_value_with_unify() -> None:\n # Given 'a', this FST will print 'b'\n # Given 'c', this FST will print both 'a', and 'b'\n fst = make_fst(\n # 1 -@R.x.a@-> 5; 5 -0:a-> (2)\n \"1 161 5 0\",\n \"5 0 97 2 0\",\n # 1 -@R.x.b@-> 6; 6 -0:b-> (2)\n \"1 162 6 0\",\n \"6 0 98 2 0\",\n a_and_b=\"unify\",\n )\n\n assert set(fst.generate(\"a\")) == {\"a\"}\n assert set(fst.generate(\"b\")) == {\"b\"}\n assert set(fst.generate(\"c\")) == set()", "def test_notImplementedLessThan(self):\n self.assertEqual(Comparable(1).__lt__(object()), NotImplemented)", "def test_not_rxx_equivalent(self):\n gate = SwapGate\n with self.assertRaises(QiskitError) as exc:\n TwoQubitControlledUDecomposer(gate)\n self.assertIn(\n \"Equivalent gate needs to take exactly 1 angle parameter.\", exc.exception.message\n )", "def missing_values(df):\n\n # penalise missing review scores\n df[\"prop_review_score\"].fillna(-1, inplace=True)\n df[\"prop_location_score1\"].fillna(-1, inplace=True)\n df[\"prop_location_score2\"].fillna(-1, inplace=True)\n df[\"visitor_hist_starrating\"].fillna(-1, inplace=True)\n df[\"visitor_hist_adr_usd\"].fillna(-1, inplace=True)\n\n # replace price by mean of hotels with same starrating\n mean_price_starrating = df.groupby(\"prop_starrating\")[\"prop_log_historical_price\"].transform(\"mean\")\n df[\"prop_log_historical_price\"].fillna(mean_price_starrating, inplace=True)\n\n # fill by worst possible value in dataset\n aff_min = df[\"srch_query_affinity_score\"].min()\n df[\"srch_query_affinity_score\"].fillna(aff_min, inplace=True)\n\n # TODO: is dit worst???? hoezo is verder weg slechter?\n orig_max = df[\"orig_destination_distance\"].max()\n df[\"orig_destination_distance\"].fillna(orig_max, inplace=True)\n\n # remaining mv's are replaced by mean of column\n # df = df.fillna(df.mean())\n print(\"er zijn nog zoveel nans: \", df.isnull().sum().sum())\n\n return df" ]
[ "0.6356365", "0.56324995", "0.5260674", "0.5232673", "0.51086223", "0.5028781", "0.5027332", "0.49481612", "0.48879817", "0.4887651", "0.48581392", "0.4851787", "0.48394328", "0.48073387", "0.48042664", "0.47770894", "0.47716808", "0.4759484", "0.47486883", "0.47274226", "0.47232836", "0.47153628", "0.4714673", "0.47144112", "0.4707077", "0.4698117", "0.46872255", "0.46520314", "0.4643336", "0.46312854", "0.46165857", "0.46093032", "0.45991904", "0.45927265", "0.45895317", "0.45553607", "0.45550314", "0.45404685", "0.45404685", "0.45312065", "0.45241743", "0.45226714", "0.45226714", "0.45148984", "0.4500773", "0.44851986", "0.44800222", "0.44800064", "0.44798666", "0.44612488", "0.44598353", "0.44582385", "0.44498858", "0.44437742", "0.44426334", "0.44364014", "0.44364014", "0.44333836", "0.44309828", "0.44302157", "0.4428263", "0.44263417", "0.4423512", "0.4422614", "0.4419074", "0.44178027", "0.44175133", "0.44039476", "0.43941382", "0.43907502", "0.43855304", "0.4378287", "0.43776235", "0.43752736", "0.43741813", "0.43685377", "0.43682975", "0.4363379", "0.4361929", "0.43546623", "0.4353992", "0.43534848", "0.4353147", "0.4351677", "0.43496957", "0.43492135", "0.434722", "0.43469942", "0.43469942", "0.43469942", "0.43439764", "0.43405426", "0.43369326", "0.43357158", "0.43336225", "0.43304914", "0.43284062", "0.43277046", "0.43252683", "0.4322795" ]
0.6314329
1
Ensure a missing lhs returns rhs.
def test_no_rhs(self): from sosbeacon.utils import get_latest_datetime lhs = object() rhs = None result = get_latest_datetime(lhs, rhs) self.assertIs(lhs, result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_no_lhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = None\n rhs = object()\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(rhs, result)", "def not_equal(lhs, rhs):\n return _make.not_equal(lhs, rhs)", "def isnone(cls, lhs, rhs):\n if rhs:\n return lhs is None\n else:\n return lhs is not None", "def test_rightHandArgumentImplementsUnequality(self):\n self.assertFalse(Record(1, 2) != EqualToEverything())\n self.assertTrue(Record(1, 2) != EqualToNothing())", "def check_nondiff_rop(self, y):\r\n raised = False\r\n try:\r\n tmp = tensor.Rop(y, self.x, self.v)\r\n except ValueError:\r\n raised = True\r\n if not raised:\r\n self.fail((\r\n 'Op did not raise an error even though the function'\r\n ' is not differentiable'))", "def binop(x, y, op):\n\n if (x is None) and (y is None):\n return None\n\n x = x if (x is not None) else 0\n y = y if (y is not None) else 0\n return op(x,y)", "def __none_left_mult(x, y):\n if x is not None:\n return x * y\n return None", "def __ne__(self, rhs):\n return not self.__eq__(rhs)", "def nonull(val):\n return val if not pd.isnull(val) else None", "def test_notImplementedNotEquals(self):\n self.assertEqual(Comparable(1).__ne__(object()), NotImplemented)", "def add_ignore_empty(x, y):\n\n def _ignore(t):\n return t is None or (isinstance(t, tuple) and len(t) == 0)\n\n if _ignore(y):\n return x\n elif _ignore(x):\n return y\n else:\n return x + y", "def nexact(cls, lhs, rhs):\n return lhs != rhs", "def __ne__(self, other):\n # supported type for operand except Rational\n if isinstance(other, int):\n return self.num - other * self.den != 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den != 0", "def test_notImplementedLessThanEquals(self):\n self.assertEqual(Comparable(1).__le__(object()), NotImplemented)", "def test_missing_to_nan(self, input_data, value, expected):\n actual = data._missing_to_nan(input_data, value)\n pd.testing.assert_series_equal(actual, expected)\n self.assertIsNot(actual, input_data)", "def xor_constrain(a, b):\n if a and not b:\n return a\n if b and not a:\n return b\n if a and b:\n raise ValueError('xor error: both values cannot be True')\n raise ValueError('xor error: both values cannot be False')", "def _check_undefined(self) -> PossibleResult[T]:\n if self.constructor == Undefined:\n if not self.obj is UNDEFINED:\n raise DeserializeError(\n Undefined, self.obj, self.new_depth, self.key\n )\n return self.obj # type: ignore\n return NO_RESULT", "def requires_lvalue(self, *args):\n return _ida_hexrays.cexpr_t_requires_lvalue(self, *args)", "def test_nonEquality(self):\n # Make explicitly sure we're using !=:\n self.assertFalse(Comparable(1) != Comparable(1))\n self.assertTrue(Comparable(2) != Comparable(1))", "def _builtin_neq(arg1, arg2, **kwdargs):\n try:\n unify_value(arg1, arg2, {})\n return False\n except UnifyError:\n return True", "def test_notImplementedEquals(self):\n self.assertEqual(Comparable(1).__eq__(object()), NotImplemented)", "def minimum(lhs, rhs):\n return _make.minimum(lhs, rhs)", "def _check_loss(self, loss):\n assert not np.isnan(loss), \"Model diverged with loss = NaN\"", "def IteratedCompareMixed (lhs, rhs):\n li = iter(lhs)\n ri = iter(rhs)\n while True:\n try:\n (lv, rv) = (next(li), next(ri))\n if lv is None:\n if rv is None:\n continue\n return -1\n if rv is None:\n return 1\n if lv == rv:\n continue\n if lv < rv:\n return -1\n return 1\n except StopIteration:\n nl = len(lhs)\n nr = len(rhs)\n if nl < nr:\n return -1\n if nl == nr:\n return 0\n return 1", "def _null_min(a, b):\n if a is None:\n return b\n if b is None:\n return a\n return min(a, b)", "def get(self, lhs, default=frozenset()):\n return self._rules_by_lhs.get(lhs, default)", "def non_null_validation(x):\n return not pd.isnull(x), {}", "def test_invalid_assignment():\n with pytest.raises(TypeError):\n PropertyAndConditions(property=LinkByUID('id', 'a15'))\n with pytest.raises(TypeError):\n PropertyAndConditions(property=Property(\"property\"),\n conditions=[Condition(\"condition\"), LinkByUID('scope', 'id')])", "def _as_rhs(self):\n raise NotImplementedError", "def __ne__(left, right):\n return (not (left == right))", "def na_cmp():\n return lambda x, y: bool(pd.isna(x.magnitude)) & bool(pd.isna(y.magnitude))", "def __le__(self, other):\r\n return NotImplemented", "def __neq__(self, other): \n return not self == other", "def MissingOperand():\n\n if dec.Asm.Parse_Pointer == 0:\n errors.DoError('missoper', False)\n return True\n else:\n return False", "def operator_lhs(self, inp):\n assert self.operator is not None, \\\n \"Please set an operator with the set_operation method\"\n\n return self.operator_rhs(self.operator.forward(inp))", "def test_retNone1(self):\r\n class retNone(gof.op.Op):\r\n def make_node(self):\r\n inputs = [theano.tensor.vector()]\r\n outputs = [theano.tensor.vector()]\r\n return gof.Apply(self, inputs, outputs)\r\n\r\n def grad(self, inp, grads):\r\n x, = inp\r\n gz, = grads\r\n pass\r\n a = retNone().make_node()\r\n self.assertRaises(TypeError, grad_sources_inputs, [(a.out, one)], None)", "def __ne__(self, other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n if(self.numerator!=other.numerator):\n return True\n else:\n return False", "def data_missing_for_sorting() -> NoReturn:\n raise NotImplementedError", "def data_missing_for_sorting() -> NoReturn:\n raise NotImplementedError", "def missing_link(x):\n return x**2", "def _op_ne(self, left: Any, right: Any) -> BoolOrIter:\n out = self._op_eq(left, right)\n if isinstance(out, (numpy.ndarray, Series)):\n neout = ~out\n # neout[pandas.isna(out)] = numpy.nan\n return neout\n # out is always a numpy.ndarray\n return not out # pragma: no cover", "def __rrshift__(self, other):\r\n return NotImplemented", "def __rrshift__(self, other):\r\n return NotImplemented", "def test_neq():\n # Test for not equal special method with scalar Rnode object and float value\n x = Rnode(2.0)\n try:\n assert (x != 2) == False\n assert (x != 1) == True\n except AssertionError as e:\n print(e)\n raise AssertionError\n\n # Test for equality special method with two scalar Dual object\n x = Rnode(2.0)\n y = Rnode(2.0)\n z = Rnode(1.0)\n try:\n assert (x != y) == False\n assert (x != z) == True\n except AssertionError as e:\n print(e)\n raise AssertionError", "def _builtin_val_neq(a, b, engine=None, **k):\n check_mode((a, b), ['gg'], functor='=\\=', **k)\n a_value = a.compute_value(engine.functions)\n b_value = b.compute_value(engine.functions)\n if a_value is None or b_value is None:\n return False\n else:\n return a_value != b_value", "def _check_other(self, other):\n # pylint: disable=protected-access\n if not isinstance(other, Spectrum):\n raise RuntimeError(\n \"Tried to binop Spectrum and %s\" % (type(other))\n )\n\n if (self._pot is None) != (other._pot is None):\n raise RuntimeError(\n \"Tried to binop Spectra with nonconformant POT\"\n )\n\n if (self._lt is None) != (other._lt is None):\n raise RuntimeError(\n \"Tried to binop Spectra with nonconformant Livetimes\"\n )", "def test_grade_nan(self):\r\n\r\n sample_dict = {'x': (1, 2)}\r\n\r\n # Test problem\r\n problem = self.build_problem(sample_dict=sample_dict,\r\n num_samples=10,\r\n tolerance=\"1%\",\r\n answer=\"x\")\r\n # Expect an incorrect answer (+ nan) to be marked incorrect\r\n # Right now this evaluates to 'nan' for a given x (Python implementation-dependent)\r\n input_formula = \"10*x + 0*1e999\"\r\n self.assert_grade(problem, input_formula, \"incorrect\")\r\n # Expect an correct answer (+ nan) to be marked incorrect\r\n input_formula = \"x + 0*1e999\"\r\n self.assert_grade(problem, input_formula, \"incorrect\")", "def __ne__(self, other):\n return tf.math.not_equal(self._ordinals, other.ordinal())", "def visit_none_type(self, left: NoneType) -> T:", "def notnull(obj):\n res = isnull(obj)\n if is_scalar(res):\n return not res\n return ~res", "def test_node_error_if_nonpresent(graph_no_edges):\n with pytest.raises(ValueError):\n graph_no_edges.adjacent('Raccoon', 'Rocket')", "def less_equal(lhs, rhs):\n return _make.less_equal(lhs, rhs)", "def from_rhs(self, rhs):\n return self._rhs_rules[rhs]", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n else:\n return not result", "def test_getSiblingMissing(self):\n self.store.powerUp(self.contentStore1, ISiblingStore)\n objectId = u'sha256:NOSUCHOBJECT'\n d = self.contentStore2.getSiblingObject(objectId)\n return self.assertFailure(d, NonexistentObject\n ).addCallback(lambda e: self.assertEquals(e.objectId, objectId))", "def __ne__(self, other):\n if other != None:\n return self != other\n else:\n return True", "def __ne__(self, other):\n if other != None:\n return self != other\n else:\n return True", "def find_missing(src, dest):\n\tmissing = [item for item in src if item not in dest]\n\treturn missing", "def _abs (x):\n\n return x if le(nil,x) else -x", "def resolve_against(self, other, **kwargs):\n if isinstance(other, (Not, Or, Eq)):\n return _coconut_tail_call(other.resolve_against, self, **kwargs)\n elif (self.find_unification)(Not(other).simplify(**kwargs)) is not None:\n return bot\n else:\n return None", "def test_neg():\n # Test for negation with scalar Rnode object\n x = Rnode(5.0)\n z = -x\n try:\n assert z.value == -1 * x.value\n except AssertionError as e:\n print(e)\n raise AssertionError", "def test_rightHandArgumentImplementsEquality(self):\n self.assertTrue(Record(1, 2) == EqualToEverything())\n self.assertFalse(Record(1, 2) == EqualToNothing())", "def swap(\n self, src, dest, raise_src=False, raise_dest=False,\n raise_pred=False\n ) -> typing.Optional[Expr]:\n\n if not isinstance(src, int):\n pred = src\n src = None # None for not found, False for duplicated.\n for i, v in enumerate(self.indices):\n if pred(v):\n src = i if src is None else False\n else:\n continue\n if src is None or src is False:\n return _fail(raise_pred)\n\n if src == dest:\n self.decide(dest)\n return _UNITY\n\n for i, j in [(src, raise_src), (dest, raise_dest)]:\n if self.is_decided(i):\n return _fail(j)\n\n indices = self.indices\n src_jm, dest_jm = indices[src], indices[dest]\n # Core swapping.\n indices[src], indices[dest] = dest_jm, src_jm\n # Update bare m dummy mapping.\n src_m, dest_m = src_jm.m_symb, dest_jm.m_symb\n if src_m != dest_m:\n # When src_m and dest_m are both None, no need for any treatment.\n # Or when they are the same symbol, the set of appearances of that\n # symbol does not need to be changed as well.\n for m_symb, old_idx, new_idx in [\n (src_m, src, dest),\n (dest_m, dest, src)\n ]:\n if m_symb is None or m_symb not in self._m_dumms:\n continue\n entry = self._m_dumms[m_symb]\n if self._uniq_m:\n assert entry == old_idx\n self._m_dumms[m_symb] = new_idx\n else:\n entry.remove(old_idx) # Key error when not present.\n assert new_idx not in entry\n entry.add(new_idx)\n\n self.decide(dest)\n return _NEG_UNITY ** self._total_j", "def __lt__(self, other):\n if isinstance(other, RationalFrac):\n # NOTE: neg is False when 0 in numer.\n return (self - other).neg\n elif isinstance(other, (int, float, str)):\n return (self - (RationalFrac(other))).neg\n else:\n return NotImplemented", "def testMergeRejectsUnequalNodes():\n\n n1 = DummyNode(x=1, y=2, z=4)\n n2 = DummyNode(x=1, y=3, z=3)\n\n with pytest.raises(TypeError):\n n1.merge_with(n2)", "def assert_is_not_none(self, expr, msg=None):\r\n assert expr is not None", "def __eq__(self, other: Any) -> bool:\n return isinstance(other, Nothing)", "def equal(lhs, rhs):\n return _make.equal(lhs, rhs)", "def test_wrong_predicate(self):\n\n with pytest.raises(ValueError):\n naughty_case_opt = lq.optimizers.CaseOptimizer(\n (lambda var: False, lq.optimizers.Bop()),\n default_optimizer=optimizers.Adam(0.01),\n )\n\n # Simple MNIST model\n mnist = tf.keras.datasets.mnist\n (train_images, train_labels), _ = mnist.load_data()\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dense(10, activation=\"softmax\"),\n ]\n )\n model.compile(\n loss=\"sparse_categorical_crossentropy\",\n optimizer=naughty_case_opt,\n metrics=[\"acc\"],\n )\n\n # Should raise on first call to apply_gradients()\n model.fit(train_images[:1], train_labels[:1], epochs=1)", "def test_nan_expected(metric_class, nan_strategy, value, expected):\n metric = metric_class(nan_strategy=nan_strategy)\n metric.update(value.clone())\n out = metric.compute()\n assert np.allclose(out, expected, equal_nan=True)", "def test_check_y_not_in_domain_link(default_X_y, default_gam):\n X, y = default_X_y\n gam = default_gam\n\n with pytest.raises(ValueError):\n check_y(y + .1, default_gam.link, default_gam.distribution, verbose=False)", "def test_adjacent_unpresent(graph_with_edges):\n with pytest.raises(ValueError):\n graph_with_edges.adjacent('Captain Picard', 'Star Wars')", "def can_rewrite(self, lhs):\n return len(self[lhs]) > 0", "def none_comparison(func):\n @functools.wraps(func)\n def inner(obj1,obj2):\n if obj1 is not None and obj2 is not None:\n return func(obj1, obj2)\n if obj1 is None and obj2 is None:\n return []\n if obj1 is not None and obj2 is None:\n return Difference(f\"Second {obj1.__class__.__name__} is None\",(obj1,None))\n return Difference(f\"First {obj2.__class__.__name__} is None\",(None,obj2))\n return inner", "def iexact(self, other):", "def __ge__(self, other):\r\n return NotImplemented", "def find_missing(expected, existing):\n return set(expected).difference(existing)", "def not_none(item, alt=None):\r\n\r\n return item if item is not None else alt", "def qual_missing(id_, seq, qual):\r\n return qual is None", "def data_missing(data):\n return type(data)._from_sequence([None, data[0]])", "def __ne__(self, other):\n eq = self.__eq__(other)\n if eq is not NotImplemented:\n return not eq\n else:\n return NotImplemented", "def ne_(a, b, msg=None):\n assert a != b, msg or \"%r == %r\" % (a, b)", "def __ne__(self, other: 'LTL'):\n return not (self == other)", "def merge(self, rhs):\n if self.forwardLabel == rhs.forwardLabel and \\\n self.reverseLabel == rhs.reverseLabel:\n return Relation(self.forwardLabel, self.reverseLabel).fromSequence(\n itertools.chain(self.iteritems(), rhs.iteritems())\n )\n else:\n return Exception, \"Cannot merge relations with different labels\"", "def __ne__(self, other):\n return not_equal(self, other)", "def try_fold_comparison_binop(\n op: ast.ops.Operator, left: irast.Set, right: irast.Set, *,\n ctx: context.ContextLevel) -> typing.Optional[irast.Set]:\n left = left.expr\n right = right.expr\n\n if op == ast.ops.EQ:\n value = left.value == right.value\n elif op == ast.ops.NE:\n value = left.value != right.value\n elif op == ast.ops.GT:\n value = left.value > right.value\n elif op == ast.ops.GE:\n value = left.value >= right.value\n elif op == ast.ops.LT:\n value = left.value < right.value\n elif op == ast.ops.LE:\n value = left.value <= right.value\n else:\n value = None\n\n if value is not None:\n return setgen.ensure_set(\n irast.Constant(value=value, type=ctx.schema.get('std::bool')),\n ctx=ctx)", "def implies(A, B):\n\treturn B or not A", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n return not result", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n return not result", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n return not result", "def unary(op, v):\n if op == \"+\":\n return v\n if op == \"-\":\n return -v\n if op.lower() == \"not\":\n return not(v)\n raise Exception(\"unary op not implemented\")", "def __ne__(self, other):\n return self.isNot(other)", "def test_merge_with_weird_eq():\r\n\r\n #SCALAR CASE\r\n x = T.constant(numpy.asarray(1), name='x')\r\n y = T.constant(numpy.asarray(1), name='y')\r\n g = Env([x, y], [x+y])\r\n MergeOptimizer().optimize(g)\r\n\r\n assert len(g.apply_nodes) == 1\r\n node = list(g.apply_nodes)[0]\r\n assert len(node.inputs) == 2\r\n assert node.inputs[0] is node.inputs[1]\r\n\r\n #NONSCALAR CASE\r\n # This was created to test TensorConstantSignature\r\n x = T.constant(numpy.ones(5), name='x')\r\n y = T.constant(numpy.ones(5), name='y')\r\n g = Env([x, y], [x+y])\r\n MergeOptimizer().optimize(g)\r\n\r\n assert len(g.apply_nodes) == 1\r\n node = list(g.apply_nodes)[0]\r\n assert len(node.inputs) == 2\r\n assert node.inputs[0] is node.inputs[1]", "def is_neq(lhs, rhs, assumptions=None):\n return fuzzy_not(is_eq(lhs, rhs, assumptions))", "def not_equals_success_func(target, result):\n if result is None:\n return False\n return target != result", "def test_disallow_value_with_unify() -> None:\n # Given 'a', this FST will print 'b'\n # Given 'c', this FST will print both 'a', and 'b'\n fst = make_fst(\n # 1 -@D.x.a@-> 5; 5 -0:a-> (2)\n \"1 151 5 0\",\n \"5 0 97 2 0\",\n # 1 -@D.x.b@-> 6; 6 -0:b-> (2)\n \"1 152 6 0\",\n \"6 0 98 2 0\",\n a_and_b=\"unify\",\n )\n\n assert set(fst.generate(\"a\")) == {\"b\"}\n assert set(fst.generate(\"b\")) == {\"a\"}\n assert set(fst.generate(\"c\")) == {\"a\", \"b\"}", "def test_require_value_with_unify() -> None:\n # Given 'a', this FST will print 'b'\n # Given 'c', this FST will print both 'a', and 'b'\n fst = make_fst(\n # 1 -@R.x.a@-> 5; 5 -0:a-> (2)\n \"1 161 5 0\",\n \"5 0 97 2 0\",\n # 1 -@R.x.b@-> 6; 6 -0:b-> (2)\n \"1 162 6 0\",\n \"6 0 98 2 0\",\n a_and_b=\"unify\",\n )\n\n assert set(fst.generate(\"a\")) == {\"a\"}\n assert set(fst.generate(\"b\")) == {\"b\"}\n assert set(fst.generate(\"c\")) == set()", "def test_notImplementedLessThan(self):\n self.assertEqual(Comparable(1).__lt__(object()), NotImplemented)", "def test_not_rxx_equivalent(self):\n gate = SwapGate\n with self.assertRaises(QiskitError) as exc:\n TwoQubitControlledUDecomposer(gate)\n self.assertIn(\n \"Equivalent gate needs to take exactly 1 angle parameter.\", exc.exception.message\n )", "def missing_values(df):\n\n # penalise missing review scores\n df[\"prop_review_score\"].fillna(-1, inplace=True)\n df[\"prop_location_score1\"].fillna(-1, inplace=True)\n df[\"prop_location_score2\"].fillna(-1, inplace=True)\n df[\"visitor_hist_starrating\"].fillna(-1, inplace=True)\n df[\"visitor_hist_adr_usd\"].fillna(-1, inplace=True)\n\n # replace price by mean of hotels with same starrating\n mean_price_starrating = df.groupby(\"prop_starrating\")[\"prop_log_historical_price\"].transform(\"mean\")\n df[\"prop_log_historical_price\"].fillna(mean_price_starrating, inplace=True)\n\n # fill by worst possible value in dataset\n aff_min = df[\"srch_query_affinity_score\"].min()\n df[\"srch_query_affinity_score\"].fillna(aff_min, inplace=True)\n\n # TODO: is dit worst???? hoezo is verder weg slechter?\n orig_max = df[\"orig_destination_distance\"].max()\n df[\"orig_destination_distance\"].fillna(orig_max, inplace=True)\n\n # remaining mv's are replaced by mean of column\n # df = df.fillna(df.mean())\n print(\"er zijn nog zoveel nans: \", df.isnull().sum().sum())\n\n return df" ]
[ "0.6314329", "0.56324995", "0.5260674", "0.5232673", "0.51086223", "0.5028781", "0.5027332", "0.49481612", "0.48879817", "0.4887651", "0.48581392", "0.4851787", "0.48394328", "0.48073387", "0.48042664", "0.47770894", "0.47716808", "0.4759484", "0.47486883", "0.47274226", "0.47232836", "0.47153628", "0.4714673", "0.47144112", "0.4707077", "0.4698117", "0.46872255", "0.46520314", "0.4643336", "0.46312854", "0.46165857", "0.46093032", "0.45991904", "0.45927265", "0.45895317", "0.45553607", "0.45550314", "0.45404685", "0.45404685", "0.45312065", "0.45241743", "0.45226714", "0.45226714", "0.45148984", "0.4500773", "0.44851986", "0.44800222", "0.44800064", "0.44798666", "0.44612488", "0.44598353", "0.44582385", "0.44498858", "0.44437742", "0.44426334", "0.44364014", "0.44364014", "0.44333836", "0.44309828", "0.44302157", "0.4428263", "0.44263417", "0.4423512", "0.4422614", "0.4419074", "0.44178027", "0.44175133", "0.44039476", "0.43941382", "0.43907502", "0.43855304", "0.4378287", "0.43776235", "0.43752736", "0.43741813", "0.43685377", "0.43682975", "0.4363379", "0.4361929", "0.43546623", "0.4353992", "0.43534848", "0.4353147", "0.4351677", "0.43496957", "0.43492135", "0.434722", "0.43469942", "0.43469942", "0.43469942", "0.43439764", "0.43405426", "0.43369326", "0.43357158", "0.43336225", "0.43304914", "0.43284062", "0.43277046", "0.43252683", "0.4322795" ]
0.6356365
0
Ensure a missing lhs returns rhs.
def test_larger_lhs(self): from sosbeacon.utils import get_latest_datetime lhs = datetime(2012, 9, 20, 3, 45) rhs = datetime(2012, 9, 20, 2, 45) result = get_latest_datetime(lhs, rhs) self.assertIs(lhs, result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_no_rhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = object()\n rhs = None\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(lhs, result)", "def test_no_lhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = None\n rhs = object()\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(rhs, result)", "def not_equal(lhs, rhs):\n return _make.not_equal(lhs, rhs)", "def isnone(cls, lhs, rhs):\n if rhs:\n return lhs is None\n else:\n return lhs is not None", "def test_rightHandArgumentImplementsUnequality(self):\n self.assertFalse(Record(1, 2) != EqualToEverything())\n self.assertTrue(Record(1, 2) != EqualToNothing())", "def check_nondiff_rop(self, y):\r\n raised = False\r\n try:\r\n tmp = tensor.Rop(y, self.x, self.v)\r\n except ValueError:\r\n raised = True\r\n if not raised:\r\n self.fail((\r\n 'Op did not raise an error even though the function'\r\n ' is not differentiable'))", "def binop(x, y, op):\n\n if (x is None) and (y is None):\n return None\n\n x = x if (x is not None) else 0\n y = y if (y is not None) else 0\n return op(x,y)", "def __none_left_mult(x, y):\n if x is not None:\n return x * y\n return None", "def __ne__(self, rhs):\n return not self.__eq__(rhs)", "def nonull(val):\n return val if not pd.isnull(val) else None", "def test_notImplementedNotEquals(self):\n self.assertEqual(Comparable(1).__ne__(object()), NotImplemented)", "def add_ignore_empty(x, y):\n\n def _ignore(t):\n return t is None or (isinstance(t, tuple) and len(t) == 0)\n\n if _ignore(y):\n return x\n elif _ignore(x):\n return y\n else:\n return x + y", "def nexact(cls, lhs, rhs):\n return lhs != rhs", "def __ne__(self, other):\n # supported type for operand except Rational\n if isinstance(other, int):\n return self.num - other * self.den != 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den != 0", "def test_notImplementedLessThanEquals(self):\n self.assertEqual(Comparable(1).__le__(object()), NotImplemented)", "def test_missing_to_nan(self, input_data, value, expected):\n actual = data._missing_to_nan(input_data, value)\n pd.testing.assert_series_equal(actual, expected)\n self.assertIsNot(actual, input_data)", "def xor_constrain(a, b):\n if a and not b:\n return a\n if b and not a:\n return b\n if a and b:\n raise ValueError('xor error: both values cannot be True')\n raise ValueError('xor error: both values cannot be False')", "def _check_undefined(self) -> PossibleResult[T]:\n if self.constructor == Undefined:\n if not self.obj is UNDEFINED:\n raise DeserializeError(\n Undefined, self.obj, self.new_depth, self.key\n )\n return self.obj # type: ignore\n return NO_RESULT", "def requires_lvalue(self, *args):\n return _ida_hexrays.cexpr_t_requires_lvalue(self, *args)", "def test_nonEquality(self):\n # Make explicitly sure we're using !=:\n self.assertFalse(Comparable(1) != Comparable(1))\n self.assertTrue(Comparable(2) != Comparable(1))", "def _builtin_neq(arg1, arg2, **kwdargs):\n try:\n unify_value(arg1, arg2, {})\n return False\n except UnifyError:\n return True", "def test_notImplementedEquals(self):\n self.assertEqual(Comparable(1).__eq__(object()), NotImplemented)", "def minimum(lhs, rhs):\n return _make.minimum(lhs, rhs)", "def _check_loss(self, loss):\n assert not np.isnan(loss), \"Model diverged with loss = NaN\"", "def IteratedCompareMixed (lhs, rhs):\n li = iter(lhs)\n ri = iter(rhs)\n while True:\n try:\n (lv, rv) = (next(li), next(ri))\n if lv is None:\n if rv is None:\n continue\n return -1\n if rv is None:\n return 1\n if lv == rv:\n continue\n if lv < rv:\n return -1\n return 1\n except StopIteration:\n nl = len(lhs)\n nr = len(rhs)\n if nl < nr:\n return -1\n if nl == nr:\n return 0\n return 1", "def _null_min(a, b):\n if a is None:\n return b\n if b is None:\n return a\n return min(a, b)", "def get(self, lhs, default=frozenset()):\n return self._rules_by_lhs.get(lhs, default)", "def non_null_validation(x):\n return not pd.isnull(x), {}", "def test_invalid_assignment():\n with pytest.raises(TypeError):\n PropertyAndConditions(property=LinkByUID('id', 'a15'))\n with pytest.raises(TypeError):\n PropertyAndConditions(property=Property(\"property\"),\n conditions=[Condition(\"condition\"), LinkByUID('scope', 'id')])", "def _as_rhs(self):\n raise NotImplementedError", "def __ne__(left, right):\n return (not (left == right))", "def na_cmp():\n return lambda x, y: bool(pd.isna(x.magnitude)) & bool(pd.isna(y.magnitude))", "def __le__(self, other):\r\n return NotImplemented", "def __neq__(self, other): \n return not self == other", "def MissingOperand():\n\n if dec.Asm.Parse_Pointer == 0:\n errors.DoError('missoper', False)\n return True\n else:\n return False", "def operator_lhs(self, inp):\n assert self.operator is not None, \\\n \"Please set an operator with the set_operation method\"\n\n return self.operator_rhs(self.operator.forward(inp))", "def test_retNone1(self):\r\n class retNone(gof.op.Op):\r\n def make_node(self):\r\n inputs = [theano.tensor.vector()]\r\n outputs = [theano.tensor.vector()]\r\n return gof.Apply(self, inputs, outputs)\r\n\r\n def grad(self, inp, grads):\r\n x, = inp\r\n gz, = grads\r\n pass\r\n a = retNone().make_node()\r\n self.assertRaises(TypeError, grad_sources_inputs, [(a.out, one)], None)", "def __ne__(self, other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n if(self.numerator!=other.numerator):\n return True\n else:\n return False", "def data_missing_for_sorting() -> NoReturn:\n raise NotImplementedError", "def data_missing_for_sorting() -> NoReturn:\n raise NotImplementedError", "def missing_link(x):\n return x**2", "def _op_ne(self, left: Any, right: Any) -> BoolOrIter:\n out = self._op_eq(left, right)\n if isinstance(out, (numpy.ndarray, Series)):\n neout = ~out\n # neout[pandas.isna(out)] = numpy.nan\n return neout\n # out is always a numpy.ndarray\n return not out # pragma: no cover", "def __rrshift__(self, other):\r\n return NotImplemented", "def __rrshift__(self, other):\r\n return NotImplemented", "def test_neq():\n # Test for not equal special method with scalar Rnode object and float value\n x = Rnode(2.0)\n try:\n assert (x != 2) == False\n assert (x != 1) == True\n except AssertionError as e:\n print(e)\n raise AssertionError\n\n # Test for equality special method with two scalar Dual object\n x = Rnode(2.0)\n y = Rnode(2.0)\n z = Rnode(1.0)\n try:\n assert (x != y) == False\n assert (x != z) == True\n except AssertionError as e:\n print(e)\n raise AssertionError", "def _builtin_val_neq(a, b, engine=None, **k):\n check_mode((a, b), ['gg'], functor='=\\=', **k)\n a_value = a.compute_value(engine.functions)\n b_value = b.compute_value(engine.functions)\n if a_value is None or b_value is None:\n return False\n else:\n return a_value != b_value", "def _check_other(self, other):\n # pylint: disable=protected-access\n if not isinstance(other, Spectrum):\n raise RuntimeError(\n \"Tried to binop Spectrum and %s\" % (type(other))\n )\n\n if (self._pot is None) != (other._pot is None):\n raise RuntimeError(\n \"Tried to binop Spectra with nonconformant POT\"\n )\n\n if (self._lt is None) != (other._lt is None):\n raise RuntimeError(\n \"Tried to binop Spectra with nonconformant Livetimes\"\n )", "def test_grade_nan(self):\r\n\r\n sample_dict = {'x': (1, 2)}\r\n\r\n # Test problem\r\n problem = self.build_problem(sample_dict=sample_dict,\r\n num_samples=10,\r\n tolerance=\"1%\",\r\n answer=\"x\")\r\n # Expect an incorrect answer (+ nan) to be marked incorrect\r\n # Right now this evaluates to 'nan' for a given x (Python implementation-dependent)\r\n input_formula = \"10*x + 0*1e999\"\r\n self.assert_grade(problem, input_formula, \"incorrect\")\r\n # Expect an correct answer (+ nan) to be marked incorrect\r\n input_formula = \"x + 0*1e999\"\r\n self.assert_grade(problem, input_formula, \"incorrect\")", "def __ne__(self, other):\n return tf.math.not_equal(self._ordinals, other.ordinal())", "def visit_none_type(self, left: NoneType) -> T:", "def notnull(obj):\n res = isnull(obj)\n if is_scalar(res):\n return not res\n return ~res", "def test_node_error_if_nonpresent(graph_no_edges):\n with pytest.raises(ValueError):\n graph_no_edges.adjacent('Raccoon', 'Rocket')", "def less_equal(lhs, rhs):\n return _make.less_equal(lhs, rhs)", "def from_rhs(self, rhs):\n return self._rhs_rules[rhs]", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n else:\n return not result", "def test_getSiblingMissing(self):\n self.store.powerUp(self.contentStore1, ISiblingStore)\n objectId = u'sha256:NOSUCHOBJECT'\n d = self.contentStore2.getSiblingObject(objectId)\n return self.assertFailure(d, NonexistentObject\n ).addCallback(lambda e: self.assertEquals(e.objectId, objectId))", "def __ne__(self, other):\n if other != None:\n return self != other\n else:\n return True", "def __ne__(self, other):\n if other != None:\n return self != other\n else:\n return True", "def find_missing(src, dest):\n\tmissing = [item for item in src if item not in dest]\n\treturn missing", "def _abs (x):\n\n return x if le(nil,x) else -x", "def resolve_against(self, other, **kwargs):\n if isinstance(other, (Not, Or, Eq)):\n return _coconut_tail_call(other.resolve_against, self, **kwargs)\n elif (self.find_unification)(Not(other).simplify(**kwargs)) is not None:\n return bot\n else:\n return None", "def test_neg():\n # Test for negation with scalar Rnode object\n x = Rnode(5.0)\n z = -x\n try:\n assert z.value == -1 * x.value\n except AssertionError as e:\n print(e)\n raise AssertionError", "def test_rightHandArgumentImplementsEquality(self):\n self.assertTrue(Record(1, 2) == EqualToEverything())\n self.assertFalse(Record(1, 2) == EqualToNothing())", "def swap(\n self, src, dest, raise_src=False, raise_dest=False,\n raise_pred=False\n ) -> typing.Optional[Expr]:\n\n if not isinstance(src, int):\n pred = src\n src = None # None for not found, False for duplicated.\n for i, v in enumerate(self.indices):\n if pred(v):\n src = i if src is None else False\n else:\n continue\n if src is None or src is False:\n return _fail(raise_pred)\n\n if src == dest:\n self.decide(dest)\n return _UNITY\n\n for i, j in [(src, raise_src), (dest, raise_dest)]:\n if self.is_decided(i):\n return _fail(j)\n\n indices = self.indices\n src_jm, dest_jm = indices[src], indices[dest]\n # Core swapping.\n indices[src], indices[dest] = dest_jm, src_jm\n # Update bare m dummy mapping.\n src_m, dest_m = src_jm.m_symb, dest_jm.m_symb\n if src_m != dest_m:\n # When src_m and dest_m are both None, no need for any treatment.\n # Or when they are the same symbol, the set of appearances of that\n # symbol does not need to be changed as well.\n for m_symb, old_idx, new_idx in [\n (src_m, src, dest),\n (dest_m, dest, src)\n ]:\n if m_symb is None or m_symb not in self._m_dumms:\n continue\n entry = self._m_dumms[m_symb]\n if self._uniq_m:\n assert entry == old_idx\n self._m_dumms[m_symb] = new_idx\n else:\n entry.remove(old_idx) # Key error when not present.\n assert new_idx not in entry\n entry.add(new_idx)\n\n self.decide(dest)\n return _NEG_UNITY ** self._total_j", "def __lt__(self, other):\n if isinstance(other, RationalFrac):\n # NOTE: neg is False when 0 in numer.\n return (self - other).neg\n elif isinstance(other, (int, float, str)):\n return (self - (RationalFrac(other))).neg\n else:\n return NotImplemented", "def testMergeRejectsUnequalNodes():\n\n n1 = DummyNode(x=1, y=2, z=4)\n n2 = DummyNode(x=1, y=3, z=3)\n\n with pytest.raises(TypeError):\n n1.merge_with(n2)", "def assert_is_not_none(self, expr, msg=None):\r\n assert expr is not None", "def __eq__(self, other: Any) -> bool:\n return isinstance(other, Nothing)", "def equal(lhs, rhs):\n return _make.equal(lhs, rhs)", "def test_wrong_predicate(self):\n\n with pytest.raises(ValueError):\n naughty_case_opt = lq.optimizers.CaseOptimizer(\n (lambda var: False, lq.optimizers.Bop()),\n default_optimizer=optimizers.Adam(0.01),\n )\n\n # Simple MNIST model\n mnist = tf.keras.datasets.mnist\n (train_images, train_labels), _ = mnist.load_data()\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dense(10, activation=\"softmax\"),\n ]\n )\n model.compile(\n loss=\"sparse_categorical_crossentropy\",\n optimizer=naughty_case_opt,\n metrics=[\"acc\"],\n )\n\n # Should raise on first call to apply_gradients()\n model.fit(train_images[:1], train_labels[:1], epochs=1)", "def test_nan_expected(metric_class, nan_strategy, value, expected):\n metric = metric_class(nan_strategy=nan_strategy)\n metric.update(value.clone())\n out = metric.compute()\n assert np.allclose(out, expected, equal_nan=True)", "def test_check_y_not_in_domain_link(default_X_y, default_gam):\n X, y = default_X_y\n gam = default_gam\n\n with pytest.raises(ValueError):\n check_y(y + .1, default_gam.link, default_gam.distribution, verbose=False)", "def test_adjacent_unpresent(graph_with_edges):\n with pytest.raises(ValueError):\n graph_with_edges.adjacent('Captain Picard', 'Star Wars')", "def can_rewrite(self, lhs):\n return len(self[lhs]) > 0", "def none_comparison(func):\n @functools.wraps(func)\n def inner(obj1,obj2):\n if obj1 is not None and obj2 is not None:\n return func(obj1, obj2)\n if obj1 is None and obj2 is None:\n return []\n if obj1 is not None and obj2 is None:\n return Difference(f\"Second {obj1.__class__.__name__} is None\",(obj1,None))\n return Difference(f\"First {obj2.__class__.__name__} is None\",(None,obj2))\n return inner", "def iexact(self, other):", "def __ge__(self, other):\r\n return NotImplemented", "def find_missing(expected, existing):\n return set(expected).difference(existing)", "def not_none(item, alt=None):\r\n\r\n return item if item is not None else alt", "def qual_missing(id_, seq, qual):\r\n return qual is None", "def data_missing(data):\n return type(data)._from_sequence([None, data[0]])", "def __ne__(self, other):\n eq = self.__eq__(other)\n if eq is not NotImplemented:\n return not eq\n else:\n return NotImplemented", "def ne_(a, b, msg=None):\n assert a != b, msg or \"%r == %r\" % (a, b)", "def __ne__(self, other: 'LTL'):\n return not (self == other)", "def merge(self, rhs):\n if self.forwardLabel == rhs.forwardLabel and \\\n self.reverseLabel == rhs.reverseLabel:\n return Relation(self.forwardLabel, self.reverseLabel).fromSequence(\n itertools.chain(self.iteritems(), rhs.iteritems())\n )\n else:\n return Exception, \"Cannot merge relations with different labels\"", "def __ne__(self, other):\n return not_equal(self, other)", "def try_fold_comparison_binop(\n op: ast.ops.Operator, left: irast.Set, right: irast.Set, *,\n ctx: context.ContextLevel) -> typing.Optional[irast.Set]:\n left = left.expr\n right = right.expr\n\n if op == ast.ops.EQ:\n value = left.value == right.value\n elif op == ast.ops.NE:\n value = left.value != right.value\n elif op == ast.ops.GT:\n value = left.value > right.value\n elif op == ast.ops.GE:\n value = left.value >= right.value\n elif op == ast.ops.LT:\n value = left.value < right.value\n elif op == ast.ops.LE:\n value = left.value <= right.value\n else:\n value = None\n\n if value is not None:\n return setgen.ensure_set(\n irast.Constant(value=value, type=ctx.schema.get('std::bool')),\n ctx=ctx)", "def implies(A, B):\n\treturn B or not A", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n return not result", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n return not result", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n return not result", "def unary(op, v):\n if op == \"+\":\n return v\n if op == \"-\":\n return -v\n if op.lower() == \"not\":\n return not(v)\n raise Exception(\"unary op not implemented\")", "def __ne__(self, other):\n return self.isNot(other)", "def test_merge_with_weird_eq():\r\n\r\n #SCALAR CASE\r\n x = T.constant(numpy.asarray(1), name='x')\r\n y = T.constant(numpy.asarray(1), name='y')\r\n g = Env([x, y], [x+y])\r\n MergeOptimizer().optimize(g)\r\n\r\n assert len(g.apply_nodes) == 1\r\n node = list(g.apply_nodes)[0]\r\n assert len(node.inputs) == 2\r\n assert node.inputs[0] is node.inputs[1]\r\n\r\n #NONSCALAR CASE\r\n # This was created to test TensorConstantSignature\r\n x = T.constant(numpy.ones(5), name='x')\r\n y = T.constant(numpy.ones(5), name='y')\r\n g = Env([x, y], [x+y])\r\n MergeOptimizer().optimize(g)\r\n\r\n assert len(g.apply_nodes) == 1\r\n node = list(g.apply_nodes)[0]\r\n assert len(node.inputs) == 2\r\n assert node.inputs[0] is node.inputs[1]", "def is_neq(lhs, rhs, assumptions=None):\n return fuzzy_not(is_eq(lhs, rhs, assumptions))", "def not_equals_success_func(target, result):\n if result is None:\n return False\n return target != result", "def test_disallow_value_with_unify() -> None:\n # Given 'a', this FST will print 'b'\n # Given 'c', this FST will print both 'a', and 'b'\n fst = make_fst(\n # 1 -@D.x.a@-> 5; 5 -0:a-> (2)\n \"1 151 5 0\",\n \"5 0 97 2 0\",\n # 1 -@D.x.b@-> 6; 6 -0:b-> (2)\n \"1 152 6 0\",\n \"6 0 98 2 0\",\n a_and_b=\"unify\",\n )\n\n assert set(fst.generate(\"a\")) == {\"b\"}\n assert set(fst.generate(\"b\")) == {\"a\"}\n assert set(fst.generate(\"c\")) == {\"a\", \"b\"}", "def test_require_value_with_unify() -> None:\n # Given 'a', this FST will print 'b'\n # Given 'c', this FST will print both 'a', and 'b'\n fst = make_fst(\n # 1 -@R.x.a@-> 5; 5 -0:a-> (2)\n \"1 161 5 0\",\n \"5 0 97 2 0\",\n # 1 -@R.x.b@-> 6; 6 -0:b-> (2)\n \"1 162 6 0\",\n \"6 0 98 2 0\",\n a_and_b=\"unify\",\n )\n\n assert set(fst.generate(\"a\")) == {\"a\"}\n assert set(fst.generate(\"b\")) == {\"b\"}\n assert set(fst.generate(\"c\")) == set()", "def test_notImplementedLessThan(self):\n self.assertEqual(Comparable(1).__lt__(object()), NotImplemented)", "def test_not_rxx_equivalent(self):\n gate = SwapGate\n with self.assertRaises(QiskitError) as exc:\n TwoQubitControlledUDecomposer(gate)\n self.assertIn(\n \"Equivalent gate needs to take exactly 1 angle parameter.\", exc.exception.message\n )", "def missing_values(df):\n\n # penalise missing review scores\n df[\"prop_review_score\"].fillna(-1, inplace=True)\n df[\"prop_location_score1\"].fillna(-1, inplace=True)\n df[\"prop_location_score2\"].fillna(-1, inplace=True)\n df[\"visitor_hist_starrating\"].fillna(-1, inplace=True)\n df[\"visitor_hist_adr_usd\"].fillna(-1, inplace=True)\n\n # replace price by mean of hotels with same starrating\n mean_price_starrating = df.groupby(\"prop_starrating\")[\"prop_log_historical_price\"].transform(\"mean\")\n df[\"prop_log_historical_price\"].fillna(mean_price_starrating, inplace=True)\n\n # fill by worst possible value in dataset\n aff_min = df[\"srch_query_affinity_score\"].min()\n df[\"srch_query_affinity_score\"].fillna(aff_min, inplace=True)\n\n # TODO: is dit worst???? hoezo is verder weg slechter?\n orig_max = df[\"orig_destination_distance\"].max()\n df[\"orig_destination_distance\"].fillna(orig_max, inplace=True)\n\n # remaining mv's are replaced by mean of column\n # df = df.fillna(df.mean())\n print(\"er zijn nog zoveel nans: \", df.isnull().sum().sum())\n\n return df" ]
[ "0.6356365", "0.6314329", "0.56324995", "0.5260674", "0.5232673", "0.51086223", "0.5028781", "0.5027332", "0.49481612", "0.48879817", "0.4887651", "0.48581392", "0.4851787", "0.48394328", "0.48073387", "0.48042664", "0.47770894", "0.47716808", "0.4759484", "0.47486883", "0.47274226", "0.47232836", "0.47153628", "0.4714673", "0.47144112", "0.4707077", "0.4698117", "0.46872255", "0.46520314", "0.4643336", "0.46312854", "0.46165857", "0.46093032", "0.45991904", "0.45927265", "0.45895317", "0.45553607", "0.45550314", "0.45404685", "0.45404685", "0.45312065", "0.45241743", "0.45226714", "0.45226714", "0.45148984", "0.4500773", "0.44851986", "0.44800222", "0.44800064", "0.44798666", "0.44612488", "0.44598353", "0.44582385", "0.44498858", "0.44437742", "0.44426334", "0.44364014", "0.44364014", "0.44333836", "0.44309828", "0.44302157", "0.4428263", "0.44263417", "0.4423512", "0.4422614", "0.4419074", "0.44178027", "0.44175133", "0.44039476", "0.43941382", "0.43907502", "0.43855304", "0.4378287", "0.43776235", "0.43752736", "0.43741813", "0.43685377", "0.43682975", "0.4363379", "0.4361929", "0.43546623", "0.4353992", "0.43534848", "0.4353147", "0.4351677", "0.43496957", "0.43492135", "0.434722", "0.43469942", "0.43469942", "0.43469942", "0.43439764", "0.43405426", "0.43369326", "0.43357158", "0.43336225", "0.43304914", "0.43284062", "0.43277046", "0.43252683", "0.4322795" ]
0.0
-1
Ensure a missing lhs returns rhs.
def test_larger_rhs(self): from sosbeacon.utils import get_latest_datetime lhs = datetime(2012, 9, 20, 2, 59) rhs = datetime(2012, 9, 20, 3, 00) result = get_latest_datetime(lhs, rhs) self.assertIs(rhs, result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_no_rhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = object()\n rhs = None\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(lhs, result)", "def test_no_lhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = None\n rhs = object()\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(rhs, result)", "def not_equal(lhs, rhs):\n return _make.not_equal(lhs, rhs)", "def isnone(cls, lhs, rhs):\n if rhs:\n return lhs is None\n else:\n return lhs is not None", "def test_rightHandArgumentImplementsUnequality(self):\n self.assertFalse(Record(1, 2) != EqualToEverything())\n self.assertTrue(Record(1, 2) != EqualToNothing())", "def check_nondiff_rop(self, y):\r\n raised = False\r\n try:\r\n tmp = tensor.Rop(y, self.x, self.v)\r\n except ValueError:\r\n raised = True\r\n if not raised:\r\n self.fail((\r\n 'Op did not raise an error even though the function'\r\n ' is not differentiable'))", "def binop(x, y, op):\n\n if (x is None) and (y is None):\n return None\n\n x = x if (x is not None) else 0\n y = y if (y is not None) else 0\n return op(x,y)", "def __none_left_mult(x, y):\n if x is not None:\n return x * y\n return None", "def __ne__(self, rhs):\n return not self.__eq__(rhs)", "def nonull(val):\n return val if not pd.isnull(val) else None", "def test_notImplementedNotEquals(self):\n self.assertEqual(Comparable(1).__ne__(object()), NotImplemented)", "def add_ignore_empty(x, y):\n\n def _ignore(t):\n return t is None or (isinstance(t, tuple) and len(t) == 0)\n\n if _ignore(y):\n return x\n elif _ignore(x):\n return y\n else:\n return x + y", "def nexact(cls, lhs, rhs):\n return lhs != rhs", "def __ne__(self, other):\n # supported type for operand except Rational\n if isinstance(other, int):\n return self.num - other * self.den != 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den != 0", "def test_notImplementedLessThanEquals(self):\n self.assertEqual(Comparable(1).__le__(object()), NotImplemented)", "def test_missing_to_nan(self, input_data, value, expected):\n actual = data._missing_to_nan(input_data, value)\n pd.testing.assert_series_equal(actual, expected)\n self.assertIsNot(actual, input_data)", "def xor_constrain(a, b):\n if a and not b:\n return a\n if b and not a:\n return b\n if a and b:\n raise ValueError('xor error: both values cannot be True')\n raise ValueError('xor error: both values cannot be False')", "def _check_undefined(self) -> PossibleResult[T]:\n if self.constructor == Undefined:\n if not self.obj is UNDEFINED:\n raise DeserializeError(\n Undefined, self.obj, self.new_depth, self.key\n )\n return self.obj # type: ignore\n return NO_RESULT", "def requires_lvalue(self, *args):\n return _ida_hexrays.cexpr_t_requires_lvalue(self, *args)", "def test_nonEquality(self):\n # Make explicitly sure we're using !=:\n self.assertFalse(Comparable(1) != Comparable(1))\n self.assertTrue(Comparable(2) != Comparable(1))", "def _builtin_neq(arg1, arg2, **kwdargs):\n try:\n unify_value(arg1, arg2, {})\n return False\n except UnifyError:\n return True", "def test_notImplementedEquals(self):\n self.assertEqual(Comparable(1).__eq__(object()), NotImplemented)", "def minimum(lhs, rhs):\n return _make.minimum(lhs, rhs)", "def _check_loss(self, loss):\n assert not np.isnan(loss), \"Model diverged with loss = NaN\"", "def IteratedCompareMixed (lhs, rhs):\n li = iter(lhs)\n ri = iter(rhs)\n while True:\n try:\n (lv, rv) = (next(li), next(ri))\n if lv is None:\n if rv is None:\n continue\n return -1\n if rv is None:\n return 1\n if lv == rv:\n continue\n if lv < rv:\n return -1\n return 1\n except StopIteration:\n nl = len(lhs)\n nr = len(rhs)\n if nl < nr:\n return -1\n if nl == nr:\n return 0\n return 1", "def _null_min(a, b):\n if a is None:\n return b\n if b is None:\n return a\n return min(a, b)", "def get(self, lhs, default=frozenset()):\n return self._rules_by_lhs.get(lhs, default)", "def non_null_validation(x):\n return not pd.isnull(x), {}", "def test_invalid_assignment():\n with pytest.raises(TypeError):\n PropertyAndConditions(property=LinkByUID('id', 'a15'))\n with pytest.raises(TypeError):\n PropertyAndConditions(property=Property(\"property\"),\n conditions=[Condition(\"condition\"), LinkByUID('scope', 'id')])", "def _as_rhs(self):\n raise NotImplementedError", "def __ne__(left, right):\n return (not (left == right))", "def na_cmp():\n return lambda x, y: bool(pd.isna(x.magnitude)) & bool(pd.isna(y.magnitude))", "def __le__(self, other):\r\n return NotImplemented", "def __neq__(self, other): \n return not self == other", "def MissingOperand():\n\n if dec.Asm.Parse_Pointer == 0:\n errors.DoError('missoper', False)\n return True\n else:\n return False", "def operator_lhs(self, inp):\n assert self.operator is not None, \\\n \"Please set an operator with the set_operation method\"\n\n return self.operator_rhs(self.operator.forward(inp))", "def test_retNone1(self):\r\n class retNone(gof.op.Op):\r\n def make_node(self):\r\n inputs = [theano.tensor.vector()]\r\n outputs = [theano.tensor.vector()]\r\n return gof.Apply(self, inputs, outputs)\r\n\r\n def grad(self, inp, grads):\r\n x, = inp\r\n gz, = grads\r\n pass\r\n a = retNone().make_node()\r\n self.assertRaises(TypeError, grad_sources_inputs, [(a.out, one)], None)", "def __ne__(self, other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n if(self.numerator!=other.numerator):\n return True\n else:\n return False", "def data_missing_for_sorting() -> NoReturn:\n raise NotImplementedError", "def data_missing_for_sorting() -> NoReturn:\n raise NotImplementedError", "def missing_link(x):\n return x**2", "def _op_ne(self, left: Any, right: Any) -> BoolOrIter:\n out = self._op_eq(left, right)\n if isinstance(out, (numpy.ndarray, Series)):\n neout = ~out\n # neout[pandas.isna(out)] = numpy.nan\n return neout\n # out is always a numpy.ndarray\n return not out # pragma: no cover", "def __rrshift__(self, other):\r\n return NotImplemented", "def __rrshift__(self, other):\r\n return NotImplemented", "def test_neq():\n # Test for not equal special method with scalar Rnode object and float value\n x = Rnode(2.0)\n try:\n assert (x != 2) == False\n assert (x != 1) == True\n except AssertionError as e:\n print(e)\n raise AssertionError\n\n # Test for equality special method with two scalar Dual object\n x = Rnode(2.0)\n y = Rnode(2.0)\n z = Rnode(1.0)\n try:\n assert (x != y) == False\n assert (x != z) == True\n except AssertionError as e:\n print(e)\n raise AssertionError", "def _builtin_val_neq(a, b, engine=None, **k):\n check_mode((a, b), ['gg'], functor='=\\=', **k)\n a_value = a.compute_value(engine.functions)\n b_value = b.compute_value(engine.functions)\n if a_value is None or b_value is None:\n return False\n else:\n return a_value != b_value", "def _check_other(self, other):\n # pylint: disable=protected-access\n if not isinstance(other, Spectrum):\n raise RuntimeError(\n \"Tried to binop Spectrum and %s\" % (type(other))\n )\n\n if (self._pot is None) != (other._pot is None):\n raise RuntimeError(\n \"Tried to binop Spectra with nonconformant POT\"\n )\n\n if (self._lt is None) != (other._lt is None):\n raise RuntimeError(\n \"Tried to binop Spectra with nonconformant Livetimes\"\n )", "def test_grade_nan(self):\r\n\r\n sample_dict = {'x': (1, 2)}\r\n\r\n # Test problem\r\n problem = self.build_problem(sample_dict=sample_dict,\r\n num_samples=10,\r\n tolerance=\"1%\",\r\n answer=\"x\")\r\n # Expect an incorrect answer (+ nan) to be marked incorrect\r\n # Right now this evaluates to 'nan' for a given x (Python implementation-dependent)\r\n input_formula = \"10*x + 0*1e999\"\r\n self.assert_grade(problem, input_formula, \"incorrect\")\r\n # Expect an correct answer (+ nan) to be marked incorrect\r\n input_formula = \"x + 0*1e999\"\r\n self.assert_grade(problem, input_formula, \"incorrect\")", "def __ne__(self, other):\n return tf.math.not_equal(self._ordinals, other.ordinal())", "def visit_none_type(self, left: NoneType) -> T:", "def notnull(obj):\n res = isnull(obj)\n if is_scalar(res):\n return not res\n return ~res", "def test_node_error_if_nonpresent(graph_no_edges):\n with pytest.raises(ValueError):\n graph_no_edges.adjacent('Raccoon', 'Rocket')", "def less_equal(lhs, rhs):\n return _make.less_equal(lhs, rhs)", "def from_rhs(self, rhs):\n return self._rhs_rules[rhs]", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n else:\n return not result", "def test_getSiblingMissing(self):\n self.store.powerUp(self.contentStore1, ISiblingStore)\n objectId = u'sha256:NOSUCHOBJECT'\n d = self.contentStore2.getSiblingObject(objectId)\n return self.assertFailure(d, NonexistentObject\n ).addCallback(lambda e: self.assertEquals(e.objectId, objectId))", "def __ne__(self, other):\n if other != None:\n return self != other\n else:\n return True", "def __ne__(self, other):\n if other != None:\n return self != other\n else:\n return True", "def find_missing(src, dest):\n\tmissing = [item for item in src if item not in dest]\n\treturn missing", "def _abs (x):\n\n return x if le(nil,x) else -x", "def resolve_against(self, other, **kwargs):\n if isinstance(other, (Not, Or, Eq)):\n return _coconut_tail_call(other.resolve_against, self, **kwargs)\n elif (self.find_unification)(Not(other).simplify(**kwargs)) is not None:\n return bot\n else:\n return None", "def test_neg():\n # Test for negation with scalar Rnode object\n x = Rnode(5.0)\n z = -x\n try:\n assert z.value == -1 * x.value\n except AssertionError as e:\n print(e)\n raise AssertionError", "def test_rightHandArgumentImplementsEquality(self):\n self.assertTrue(Record(1, 2) == EqualToEverything())\n self.assertFalse(Record(1, 2) == EqualToNothing())", "def swap(\n self, src, dest, raise_src=False, raise_dest=False,\n raise_pred=False\n ) -> typing.Optional[Expr]:\n\n if not isinstance(src, int):\n pred = src\n src = None # None for not found, False for duplicated.\n for i, v in enumerate(self.indices):\n if pred(v):\n src = i if src is None else False\n else:\n continue\n if src is None or src is False:\n return _fail(raise_pred)\n\n if src == dest:\n self.decide(dest)\n return _UNITY\n\n for i, j in [(src, raise_src), (dest, raise_dest)]:\n if self.is_decided(i):\n return _fail(j)\n\n indices = self.indices\n src_jm, dest_jm = indices[src], indices[dest]\n # Core swapping.\n indices[src], indices[dest] = dest_jm, src_jm\n # Update bare m dummy mapping.\n src_m, dest_m = src_jm.m_symb, dest_jm.m_symb\n if src_m != dest_m:\n # When src_m and dest_m are both None, no need for any treatment.\n # Or when they are the same symbol, the set of appearances of that\n # symbol does not need to be changed as well.\n for m_symb, old_idx, new_idx in [\n (src_m, src, dest),\n (dest_m, dest, src)\n ]:\n if m_symb is None or m_symb not in self._m_dumms:\n continue\n entry = self._m_dumms[m_symb]\n if self._uniq_m:\n assert entry == old_idx\n self._m_dumms[m_symb] = new_idx\n else:\n entry.remove(old_idx) # Key error when not present.\n assert new_idx not in entry\n entry.add(new_idx)\n\n self.decide(dest)\n return _NEG_UNITY ** self._total_j", "def __lt__(self, other):\n if isinstance(other, RationalFrac):\n # NOTE: neg is False when 0 in numer.\n return (self - other).neg\n elif isinstance(other, (int, float, str)):\n return (self - (RationalFrac(other))).neg\n else:\n return NotImplemented", "def testMergeRejectsUnequalNodes():\n\n n1 = DummyNode(x=1, y=2, z=4)\n n2 = DummyNode(x=1, y=3, z=3)\n\n with pytest.raises(TypeError):\n n1.merge_with(n2)", "def assert_is_not_none(self, expr, msg=None):\r\n assert expr is not None", "def __eq__(self, other: Any) -> bool:\n return isinstance(other, Nothing)", "def equal(lhs, rhs):\n return _make.equal(lhs, rhs)", "def test_wrong_predicate(self):\n\n with pytest.raises(ValueError):\n naughty_case_opt = lq.optimizers.CaseOptimizer(\n (lambda var: False, lq.optimizers.Bop()),\n default_optimizer=optimizers.Adam(0.01),\n )\n\n # Simple MNIST model\n mnist = tf.keras.datasets.mnist\n (train_images, train_labels), _ = mnist.load_data()\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dense(10, activation=\"softmax\"),\n ]\n )\n model.compile(\n loss=\"sparse_categorical_crossentropy\",\n optimizer=naughty_case_opt,\n metrics=[\"acc\"],\n )\n\n # Should raise on first call to apply_gradients()\n model.fit(train_images[:1], train_labels[:1], epochs=1)", "def test_nan_expected(metric_class, nan_strategy, value, expected):\n metric = metric_class(nan_strategy=nan_strategy)\n metric.update(value.clone())\n out = metric.compute()\n assert np.allclose(out, expected, equal_nan=True)", "def test_check_y_not_in_domain_link(default_X_y, default_gam):\n X, y = default_X_y\n gam = default_gam\n\n with pytest.raises(ValueError):\n check_y(y + .1, default_gam.link, default_gam.distribution, verbose=False)", "def test_adjacent_unpresent(graph_with_edges):\n with pytest.raises(ValueError):\n graph_with_edges.adjacent('Captain Picard', 'Star Wars')", "def can_rewrite(self, lhs):\n return len(self[lhs]) > 0", "def none_comparison(func):\n @functools.wraps(func)\n def inner(obj1,obj2):\n if obj1 is not None and obj2 is not None:\n return func(obj1, obj2)\n if obj1 is None and obj2 is None:\n return []\n if obj1 is not None and obj2 is None:\n return Difference(f\"Second {obj1.__class__.__name__} is None\",(obj1,None))\n return Difference(f\"First {obj2.__class__.__name__} is None\",(None,obj2))\n return inner", "def iexact(self, other):", "def __ge__(self, other):\r\n return NotImplemented", "def find_missing(expected, existing):\n return set(expected).difference(existing)", "def not_none(item, alt=None):\r\n\r\n return item if item is not None else alt", "def qual_missing(id_, seq, qual):\r\n return qual is None", "def data_missing(data):\n return type(data)._from_sequence([None, data[0]])", "def __ne__(self, other):\n eq = self.__eq__(other)\n if eq is not NotImplemented:\n return not eq\n else:\n return NotImplemented", "def ne_(a, b, msg=None):\n assert a != b, msg or \"%r == %r\" % (a, b)", "def __ne__(self, other: 'LTL'):\n return not (self == other)", "def merge(self, rhs):\n if self.forwardLabel == rhs.forwardLabel and \\\n self.reverseLabel == rhs.reverseLabel:\n return Relation(self.forwardLabel, self.reverseLabel).fromSequence(\n itertools.chain(self.iteritems(), rhs.iteritems())\n )\n else:\n return Exception, \"Cannot merge relations with different labels\"", "def __ne__(self, other):\n return not_equal(self, other)", "def try_fold_comparison_binop(\n op: ast.ops.Operator, left: irast.Set, right: irast.Set, *,\n ctx: context.ContextLevel) -> typing.Optional[irast.Set]:\n left = left.expr\n right = right.expr\n\n if op == ast.ops.EQ:\n value = left.value == right.value\n elif op == ast.ops.NE:\n value = left.value != right.value\n elif op == ast.ops.GT:\n value = left.value > right.value\n elif op == ast.ops.GE:\n value = left.value >= right.value\n elif op == ast.ops.LT:\n value = left.value < right.value\n elif op == ast.ops.LE:\n value = left.value <= right.value\n else:\n value = None\n\n if value is not None:\n return setgen.ensure_set(\n irast.Constant(value=value, type=ctx.schema.get('std::bool')),\n ctx=ctx)", "def implies(A, B):\n\treturn B or not A", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n return not result", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n return not result", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n return not result", "def unary(op, v):\n if op == \"+\":\n return v\n if op == \"-\":\n return -v\n if op.lower() == \"not\":\n return not(v)\n raise Exception(\"unary op not implemented\")", "def __ne__(self, other):\n return self.isNot(other)", "def test_merge_with_weird_eq():\r\n\r\n #SCALAR CASE\r\n x = T.constant(numpy.asarray(1), name='x')\r\n y = T.constant(numpy.asarray(1), name='y')\r\n g = Env([x, y], [x+y])\r\n MergeOptimizer().optimize(g)\r\n\r\n assert len(g.apply_nodes) == 1\r\n node = list(g.apply_nodes)[0]\r\n assert len(node.inputs) == 2\r\n assert node.inputs[0] is node.inputs[1]\r\n\r\n #NONSCALAR CASE\r\n # This was created to test TensorConstantSignature\r\n x = T.constant(numpy.ones(5), name='x')\r\n y = T.constant(numpy.ones(5), name='y')\r\n g = Env([x, y], [x+y])\r\n MergeOptimizer().optimize(g)\r\n\r\n assert len(g.apply_nodes) == 1\r\n node = list(g.apply_nodes)[0]\r\n assert len(node.inputs) == 2\r\n assert node.inputs[0] is node.inputs[1]", "def is_neq(lhs, rhs, assumptions=None):\n return fuzzy_not(is_eq(lhs, rhs, assumptions))", "def not_equals_success_func(target, result):\n if result is None:\n return False\n return target != result", "def test_disallow_value_with_unify() -> None:\n # Given 'a', this FST will print 'b'\n # Given 'c', this FST will print both 'a', and 'b'\n fst = make_fst(\n # 1 -@D.x.a@-> 5; 5 -0:a-> (2)\n \"1 151 5 0\",\n \"5 0 97 2 0\",\n # 1 -@D.x.b@-> 6; 6 -0:b-> (2)\n \"1 152 6 0\",\n \"6 0 98 2 0\",\n a_and_b=\"unify\",\n )\n\n assert set(fst.generate(\"a\")) == {\"b\"}\n assert set(fst.generate(\"b\")) == {\"a\"}\n assert set(fst.generate(\"c\")) == {\"a\", \"b\"}", "def test_require_value_with_unify() -> None:\n # Given 'a', this FST will print 'b'\n # Given 'c', this FST will print both 'a', and 'b'\n fst = make_fst(\n # 1 -@R.x.a@-> 5; 5 -0:a-> (2)\n \"1 161 5 0\",\n \"5 0 97 2 0\",\n # 1 -@R.x.b@-> 6; 6 -0:b-> (2)\n \"1 162 6 0\",\n \"6 0 98 2 0\",\n a_and_b=\"unify\",\n )\n\n assert set(fst.generate(\"a\")) == {\"a\"}\n assert set(fst.generate(\"b\")) == {\"b\"}\n assert set(fst.generate(\"c\")) == set()", "def test_notImplementedLessThan(self):\n self.assertEqual(Comparable(1).__lt__(object()), NotImplemented)", "def test_not_rxx_equivalent(self):\n gate = SwapGate\n with self.assertRaises(QiskitError) as exc:\n TwoQubitControlledUDecomposer(gate)\n self.assertIn(\n \"Equivalent gate needs to take exactly 1 angle parameter.\", exc.exception.message\n )", "def missing_values(df):\n\n # penalise missing review scores\n df[\"prop_review_score\"].fillna(-1, inplace=True)\n df[\"prop_location_score1\"].fillna(-1, inplace=True)\n df[\"prop_location_score2\"].fillna(-1, inplace=True)\n df[\"visitor_hist_starrating\"].fillna(-1, inplace=True)\n df[\"visitor_hist_adr_usd\"].fillna(-1, inplace=True)\n\n # replace price by mean of hotels with same starrating\n mean_price_starrating = df.groupby(\"prop_starrating\")[\"prop_log_historical_price\"].transform(\"mean\")\n df[\"prop_log_historical_price\"].fillna(mean_price_starrating, inplace=True)\n\n # fill by worst possible value in dataset\n aff_min = df[\"srch_query_affinity_score\"].min()\n df[\"srch_query_affinity_score\"].fillna(aff_min, inplace=True)\n\n # TODO: is dit worst???? hoezo is verder weg slechter?\n orig_max = df[\"orig_destination_distance\"].max()\n df[\"orig_destination_distance\"].fillna(orig_max, inplace=True)\n\n # remaining mv's are replaced by mean of column\n # df = df.fillna(df.mean())\n print(\"er zijn nog zoveel nans: \", df.isnull().sum().sum())\n\n return df" ]
[ "0.6356365", "0.6314329", "0.56324995", "0.5260674", "0.5232673", "0.51086223", "0.5028781", "0.5027332", "0.49481612", "0.48879817", "0.4887651", "0.48581392", "0.4851787", "0.48394328", "0.48073387", "0.48042664", "0.47770894", "0.47716808", "0.4759484", "0.47486883", "0.47274226", "0.47232836", "0.47153628", "0.4714673", "0.47144112", "0.4707077", "0.4698117", "0.46872255", "0.46520314", "0.4643336", "0.46312854", "0.46165857", "0.46093032", "0.45991904", "0.45927265", "0.45895317", "0.45553607", "0.45550314", "0.45404685", "0.45404685", "0.45312065", "0.45241743", "0.45226714", "0.45226714", "0.45148984", "0.4500773", "0.44851986", "0.44800222", "0.44800064", "0.44798666", "0.44612488", "0.44598353", "0.44582385", "0.44498858", "0.44437742", "0.44426334", "0.44364014", "0.44364014", "0.44333836", "0.44309828", "0.44302157", "0.4428263", "0.44263417", "0.4423512", "0.4422614", "0.4419074", "0.44178027", "0.44175133", "0.44039476", "0.43941382", "0.43907502", "0.43855304", "0.4378287", "0.43776235", "0.43752736", "0.43741813", "0.43685377", "0.43682975", "0.4363379", "0.4361929", "0.43546623", "0.4353992", "0.43534848", "0.4353147", "0.4351677", "0.43496957", "0.43492135", "0.434722", "0.43469942", "0.43469942", "0.43469942", "0.43439764", "0.43405426", "0.43369326", "0.43357158", "0.43336225", "0.43304914", "0.43284062", "0.43277046", "0.43252683", "0.4322795" ]
0.0
-1
Ensure a missing lhs returns rhs.
def test_equal_inputs(self): from sosbeacon.utils import get_latest_datetime lhs = rhs = datetime(2012, 9, 20, 2, 59) result = get_latest_datetime(lhs, rhs) self.assertIs(rhs, result) self.assertIs(lhs, result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_no_rhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = object()\n rhs = None\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(lhs, result)", "def test_no_lhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = None\n rhs = object()\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(rhs, result)", "def not_equal(lhs, rhs):\n return _make.not_equal(lhs, rhs)", "def isnone(cls, lhs, rhs):\n if rhs:\n return lhs is None\n else:\n return lhs is not None", "def test_rightHandArgumentImplementsUnequality(self):\n self.assertFalse(Record(1, 2) != EqualToEverything())\n self.assertTrue(Record(1, 2) != EqualToNothing())", "def check_nondiff_rop(self, y):\r\n raised = False\r\n try:\r\n tmp = tensor.Rop(y, self.x, self.v)\r\n except ValueError:\r\n raised = True\r\n if not raised:\r\n self.fail((\r\n 'Op did not raise an error even though the function'\r\n ' is not differentiable'))", "def binop(x, y, op):\n\n if (x is None) and (y is None):\n return None\n\n x = x if (x is not None) else 0\n y = y if (y is not None) else 0\n return op(x,y)", "def __none_left_mult(x, y):\n if x is not None:\n return x * y\n return None", "def __ne__(self, rhs):\n return not self.__eq__(rhs)", "def nonull(val):\n return val if not pd.isnull(val) else None", "def test_notImplementedNotEquals(self):\n self.assertEqual(Comparable(1).__ne__(object()), NotImplemented)", "def add_ignore_empty(x, y):\n\n def _ignore(t):\n return t is None or (isinstance(t, tuple) and len(t) == 0)\n\n if _ignore(y):\n return x\n elif _ignore(x):\n return y\n else:\n return x + y", "def nexact(cls, lhs, rhs):\n return lhs != rhs", "def __ne__(self, other):\n # supported type for operand except Rational\n if isinstance(other, int):\n return self.num - other * self.den != 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den != 0", "def test_notImplementedLessThanEquals(self):\n self.assertEqual(Comparable(1).__le__(object()), NotImplemented)", "def test_missing_to_nan(self, input_data, value, expected):\n actual = data._missing_to_nan(input_data, value)\n pd.testing.assert_series_equal(actual, expected)\n self.assertIsNot(actual, input_data)", "def xor_constrain(a, b):\n if a and not b:\n return a\n if b and not a:\n return b\n if a and b:\n raise ValueError('xor error: both values cannot be True')\n raise ValueError('xor error: both values cannot be False')", "def _check_undefined(self) -> PossibleResult[T]:\n if self.constructor == Undefined:\n if not self.obj is UNDEFINED:\n raise DeserializeError(\n Undefined, self.obj, self.new_depth, self.key\n )\n return self.obj # type: ignore\n return NO_RESULT", "def requires_lvalue(self, *args):\n return _ida_hexrays.cexpr_t_requires_lvalue(self, *args)", "def test_nonEquality(self):\n # Make explicitly sure we're using !=:\n self.assertFalse(Comparable(1) != Comparable(1))\n self.assertTrue(Comparable(2) != Comparable(1))", "def _builtin_neq(arg1, arg2, **kwdargs):\n try:\n unify_value(arg1, arg2, {})\n return False\n except UnifyError:\n return True", "def test_notImplementedEquals(self):\n self.assertEqual(Comparable(1).__eq__(object()), NotImplemented)", "def minimum(lhs, rhs):\n return _make.minimum(lhs, rhs)", "def _check_loss(self, loss):\n assert not np.isnan(loss), \"Model diverged with loss = NaN\"", "def IteratedCompareMixed (lhs, rhs):\n li = iter(lhs)\n ri = iter(rhs)\n while True:\n try:\n (lv, rv) = (next(li), next(ri))\n if lv is None:\n if rv is None:\n continue\n return -1\n if rv is None:\n return 1\n if lv == rv:\n continue\n if lv < rv:\n return -1\n return 1\n except StopIteration:\n nl = len(lhs)\n nr = len(rhs)\n if nl < nr:\n return -1\n if nl == nr:\n return 0\n return 1", "def _null_min(a, b):\n if a is None:\n return b\n if b is None:\n return a\n return min(a, b)", "def get(self, lhs, default=frozenset()):\n return self._rules_by_lhs.get(lhs, default)", "def non_null_validation(x):\n return not pd.isnull(x), {}", "def test_invalid_assignment():\n with pytest.raises(TypeError):\n PropertyAndConditions(property=LinkByUID('id', 'a15'))\n with pytest.raises(TypeError):\n PropertyAndConditions(property=Property(\"property\"),\n conditions=[Condition(\"condition\"), LinkByUID('scope', 'id')])", "def _as_rhs(self):\n raise NotImplementedError", "def __ne__(left, right):\n return (not (left == right))", "def na_cmp():\n return lambda x, y: bool(pd.isna(x.magnitude)) & bool(pd.isna(y.magnitude))", "def __le__(self, other):\r\n return NotImplemented", "def __neq__(self, other): \n return not self == other", "def MissingOperand():\n\n if dec.Asm.Parse_Pointer == 0:\n errors.DoError('missoper', False)\n return True\n else:\n return False", "def operator_lhs(self, inp):\n assert self.operator is not None, \\\n \"Please set an operator with the set_operation method\"\n\n return self.operator_rhs(self.operator.forward(inp))", "def test_retNone1(self):\r\n class retNone(gof.op.Op):\r\n def make_node(self):\r\n inputs = [theano.tensor.vector()]\r\n outputs = [theano.tensor.vector()]\r\n return gof.Apply(self, inputs, outputs)\r\n\r\n def grad(self, inp, grads):\r\n x, = inp\r\n gz, = grads\r\n pass\r\n a = retNone().make_node()\r\n self.assertRaises(TypeError, grad_sources_inputs, [(a.out, one)], None)", "def __ne__(self, other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n if(self.numerator!=other.numerator):\n return True\n else:\n return False", "def data_missing_for_sorting() -> NoReturn:\n raise NotImplementedError", "def data_missing_for_sorting() -> NoReturn:\n raise NotImplementedError", "def missing_link(x):\n return x**2", "def _op_ne(self, left: Any, right: Any) -> BoolOrIter:\n out = self._op_eq(left, right)\n if isinstance(out, (numpy.ndarray, Series)):\n neout = ~out\n # neout[pandas.isna(out)] = numpy.nan\n return neout\n # out is always a numpy.ndarray\n return not out # pragma: no cover", "def __rrshift__(self, other):\r\n return NotImplemented", "def __rrshift__(self, other):\r\n return NotImplemented", "def test_neq():\n # Test for not equal special method with scalar Rnode object and float value\n x = Rnode(2.0)\n try:\n assert (x != 2) == False\n assert (x != 1) == True\n except AssertionError as e:\n print(e)\n raise AssertionError\n\n # Test for equality special method with two scalar Dual object\n x = Rnode(2.0)\n y = Rnode(2.0)\n z = Rnode(1.0)\n try:\n assert (x != y) == False\n assert (x != z) == True\n except AssertionError as e:\n print(e)\n raise AssertionError", "def _builtin_val_neq(a, b, engine=None, **k):\n check_mode((a, b), ['gg'], functor='=\\=', **k)\n a_value = a.compute_value(engine.functions)\n b_value = b.compute_value(engine.functions)\n if a_value is None or b_value is None:\n return False\n else:\n return a_value != b_value", "def _check_other(self, other):\n # pylint: disable=protected-access\n if not isinstance(other, Spectrum):\n raise RuntimeError(\n \"Tried to binop Spectrum and %s\" % (type(other))\n )\n\n if (self._pot is None) != (other._pot is None):\n raise RuntimeError(\n \"Tried to binop Spectra with nonconformant POT\"\n )\n\n if (self._lt is None) != (other._lt is None):\n raise RuntimeError(\n \"Tried to binop Spectra with nonconformant Livetimes\"\n )", "def test_grade_nan(self):\r\n\r\n sample_dict = {'x': (1, 2)}\r\n\r\n # Test problem\r\n problem = self.build_problem(sample_dict=sample_dict,\r\n num_samples=10,\r\n tolerance=\"1%\",\r\n answer=\"x\")\r\n # Expect an incorrect answer (+ nan) to be marked incorrect\r\n # Right now this evaluates to 'nan' for a given x (Python implementation-dependent)\r\n input_formula = \"10*x + 0*1e999\"\r\n self.assert_grade(problem, input_formula, \"incorrect\")\r\n # Expect an correct answer (+ nan) to be marked incorrect\r\n input_formula = \"x + 0*1e999\"\r\n self.assert_grade(problem, input_formula, \"incorrect\")", "def __ne__(self, other):\n return tf.math.not_equal(self._ordinals, other.ordinal())", "def visit_none_type(self, left: NoneType) -> T:", "def notnull(obj):\n res = isnull(obj)\n if is_scalar(res):\n return not res\n return ~res", "def test_node_error_if_nonpresent(graph_no_edges):\n with pytest.raises(ValueError):\n graph_no_edges.adjacent('Raccoon', 'Rocket')", "def less_equal(lhs, rhs):\n return _make.less_equal(lhs, rhs)", "def from_rhs(self, rhs):\n return self._rhs_rules[rhs]", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n else:\n return not result", "def test_getSiblingMissing(self):\n self.store.powerUp(self.contentStore1, ISiblingStore)\n objectId = u'sha256:NOSUCHOBJECT'\n d = self.contentStore2.getSiblingObject(objectId)\n return self.assertFailure(d, NonexistentObject\n ).addCallback(lambda e: self.assertEquals(e.objectId, objectId))", "def __ne__(self, other):\n if other != None:\n return self != other\n else:\n return True", "def __ne__(self, other):\n if other != None:\n return self != other\n else:\n return True", "def find_missing(src, dest):\n\tmissing = [item for item in src if item not in dest]\n\treturn missing", "def _abs (x):\n\n return x if le(nil,x) else -x", "def resolve_against(self, other, **kwargs):\n if isinstance(other, (Not, Or, Eq)):\n return _coconut_tail_call(other.resolve_against, self, **kwargs)\n elif (self.find_unification)(Not(other).simplify(**kwargs)) is not None:\n return bot\n else:\n return None", "def test_neg():\n # Test for negation with scalar Rnode object\n x = Rnode(5.0)\n z = -x\n try:\n assert z.value == -1 * x.value\n except AssertionError as e:\n print(e)\n raise AssertionError", "def test_rightHandArgumentImplementsEquality(self):\n self.assertTrue(Record(1, 2) == EqualToEverything())\n self.assertFalse(Record(1, 2) == EqualToNothing())", "def swap(\n self, src, dest, raise_src=False, raise_dest=False,\n raise_pred=False\n ) -> typing.Optional[Expr]:\n\n if not isinstance(src, int):\n pred = src\n src = None # None for not found, False for duplicated.\n for i, v in enumerate(self.indices):\n if pred(v):\n src = i if src is None else False\n else:\n continue\n if src is None or src is False:\n return _fail(raise_pred)\n\n if src == dest:\n self.decide(dest)\n return _UNITY\n\n for i, j in [(src, raise_src), (dest, raise_dest)]:\n if self.is_decided(i):\n return _fail(j)\n\n indices = self.indices\n src_jm, dest_jm = indices[src], indices[dest]\n # Core swapping.\n indices[src], indices[dest] = dest_jm, src_jm\n # Update bare m dummy mapping.\n src_m, dest_m = src_jm.m_symb, dest_jm.m_symb\n if src_m != dest_m:\n # When src_m and dest_m are both None, no need for any treatment.\n # Or when they are the same symbol, the set of appearances of that\n # symbol does not need to be changed as well.\n for m_symb, old_idx, new_idx in [\n (src_m, src, dest),\n (dest_m, dest, src)\n ]:\n if m_symb is None or m_symb not in self._m_dumms:\n continue\n entry = self._m_dumms[m_symb]\n if self._uniq_m:\n assert entry == old_idx\n self._m_dumms[m_symb] = new_idx\n else:\n entry.remove(old_idx) # Key error when not present.\n assert new_idx not in entry\n entry.add(new_idx)\n\n self.decide(dest)\n return _NEG_UNITY ** self._total_j", "def __lt__(self, other):\n if isinstance(other, RationalFrac):\n # NOTE: neg is False when 0 in numer.\n return (self - other).neg\n elif isinstance(other, (int, float, str)):\n return (self - (RationalFrac(other))).neg\n else:\n return NotImplemented", "def testMergeRejectsUnequalNodes():\n\n n1 = DummyNode(x=1, y=2, z=4)\n n2 = DummyNode(x=1, y=3, z=3)\n\n with pytest.raises(TypeError):\n n1.merge_with(n2)", "def assert_is_not_none(self, expr, msg=None):\r\n assert expr is not None", "def __eq__(self, other: Any) -> bool:\n return isinstance(other, Nothing)", "def equal(lhs, rhs):\n return _make.equal(lhs, rhs)", "def test_wrong_predicate(self):\n\n with pytest.raises(ValueError):\n naughty_case_opt = lq.optimizers.CaseOptimizer(\n (lambda var: False, lq.optimizers.Bop()),\n default_optimizer=optimizers.Adam(0.01),\n )\n\n # Simple MNIST model\n mnist = tf.keras.datasets.mnist\n (train_images, train_labels), _ = mnist.load_data()\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dense(10, activation=\"softmax\"),\n ]\n )\n model.compile(\n loss=\"sparse_categorical_crossentropy\",\n optimizer=naughty_case_opt,\n metrics=[\"acc\"],\n )\n\n # Should raise on first call to apply_gradients()\n model.fit(train_images[:1], train_labels[:1], epochs=1)", "def test_nan_expected(metric_class, nan_strategy, value, expected):\n metric = metric_class(nan_strategy=nan_strategy)\n metric.update(value.clone())\n out = metric.compute()\n assert np.allclose(out, expected, equal_nan=True)", "def test_check_y_not_in_domain_link(default_X_y, default_gam):\n X, y = default_X_y\n gam = default_gam\n\n with pytest.raises(ValueError):\n check_y(y + .1, default_gam.link, default_gam.distribution, verbose=False)", "def test_adjacent_unpresent(graph_with_edges):\n with pytest.raises(ValueError):\n graph_with_edges.adjacent('Captain Picard', 'Star Wars')", "def can_rewrite(self, lhs):\n return len(self[lhs]) > 0", "def none_comparison(func):\n @functools.wraps(func)\n def inner(obj1,obj2):\n if obj1 is not None and obj2 is not None:\n return func(obj1, obj2)\n if obj1 is None and obj2 is None:\n return []\n if obj1 is not None and obj2 is None:\n return Difference(f\"Second {obj1.__class__.__name__} is None\",(obj1,None))\n return Difference(f\"First {obj2.__class__.__name__} is None\",(None,obj2))\n return inner", "def iexact(self, other):", "def __ge__(self, other):\r\n return NotImplemented", "def find_missing(expected, existing):\n return set(expected).difference(existing)", "def not_none(item, alt=None):\r\n\r\n return item if item is not None else alt", "def qual_missing(id_, seq, qual):\r\n return qual is None", "def data_missing(data):\n return type(data)._from_sequence([None, data[0]])", "def __ne__(self, other):\n eq = self.__eq__(other)\n if eq is not NotImplemented:\n return not eq\n else:\n return NotImplemented", "def ne_(a, b, msg=None):\n assert a != b, msg or \"%r == %r\" % (a, b)", "def __ne__(self, other: 'LTL'):\n return not (self == other)", "def merge(self, rhs):\n if self.forwardLabel == rhs.forwardLabel and \\\n self.reverseLabel == rhs.reverseLabel:\n return Relation(self.forwardLabel, self.reverseLabel).fromSequence(\n itertools.chain(self.iteritems(), rhs.iteritems())\n )\n else:\n return Exception, \"Cannot merge relations with different labels\"", "def __ne__(self, other):\n return not_equal(self, other)", "def try_fold_comparison_binop(\n op: ast.ops.Operator, left: irast.Set, right: irast.Set, *,\n ctx: context.ContextLevel) -> typing.Optional[irast.Set]:\n left = left.expr\n right = right.expr\n\n if op == ast.ops.EQ:\n value = left.value == right.value\n elif op == ast.ops.NE:\n value = left.value != right.value\n elif op == ast.ops.GT:\n value = left.value > right.value\n elif op == ast.ops.GE:\n value = left.value >= right.value\n elif op == ast.ops.LT:\n value = left.value < right.value\n elif op == ast.ops.LE:\n value = left.value <= right.value\n else:\n value = None\n\n if value is not None:\n return setgen.ensure_set(\n irast.Constant(value=value, type=ctx.schema.get('std::bool')),\n ctx=ctx)", "def implies(A, B):\n\treturn B or not A", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n return not result", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n return not result", "def __ne__(self, other):\n result = self.__eq__(other)\n if result is NotImplemented:\n return result\n return not result", "def unary(op, v):\n if op == \"+\":\n return v\n if op == \"-\":\n return -v\n if op.lower() == \"not\":\n return not(v)\n raise Exception(\"unary op not implemented\")", "def __ne__(self, other):\n return self.isNot(other)", "def test_merge_with_weird_eq():\r\n\r\n #SCALAR CASE\r\n x = T.constant(numpy.asarray(1), name='x')\r\n y = T.constant(numpy.asarray(1), name='y')\r\n g = Env([x, y], [x+y])\r\n MergeOptimizer().optimize(g)\r\n\r\n assert len(g.apply_nodes) == 1\r\n node = list(g.apply_nodes)[0]\r\n assert len(node.inputs) == 2\r\n assert node.inputs[0] is node.inputs[1]\r\n\r\n #NONSCALAR CASE\r\n # This was created to test TensorConstantSignature\r\n x = T.constant(numpy.ones(5), name='x')\r\n y = T.constant(numpy.ones(5), name='y')\r\n g = Env([x, y], [x+y])\r\n MergeOptimizer().optimize(g)\r\n\r\n assert len(g.apply_nodes) == 1\r\n node = list(g.apply_nodes)[0]\r\n assert len(node.inputs) == 2\r\n assert node.inputs[0] is node.inputs[1]", "def is_neq(lhs, rhs, assumptions=None):\n return fuzzy_not(is_eq(lhs, rhs, assumptions))", "def not_equals_success_func(target, result):\n if result is None:\n return False\n return target != result", "def test_disallow_value_with_unify() -> None:\n # Given 'a', this FST will print 'b'\n # Given 'c', this FST will print both 'a', and 'b'\n fst = make_fst(\n # 1 -@D.x.a@-> 5; 5 -0:a-> (2)\n \"1 151 5 0\",\n \"5 0 97 2 0\",\n # 1 -@D.x.b@-> 6; 6 -0:b-> (2)\n \"1 152 6 0\",\n \"6 0 98 2 0\",\n a_and_b=\"unify\",\n )\n\n assert set(fst.generate(\"a\")) == {\"b\"}\n assert set(fst.generate(\"b\")) == {\"a\"}\n assert set(fst.generate(\"c\")) == {\"a\", \"b\"}", "def test_require_value_with_unify() -> None:\n # Given 'a', this FST will print 'b'\n # Given 'c', this FST will print both 'a', and 'b'\n fst = make_fst(\n # 1 -@R.x.a@-> 5; 5 -0:a-> (2)\n \"1 161 5 0\",\n \"5 0 97 2 0\",\n # 1 -@R.x.b@-> 6; 6 -0:b-> (2)\n \"1 162 6 0\",\n \"6 0 98 2 0\",\n a_and_b=\"unify\",\n )\n\n assert set(fst.generate(\"a\")) == {\"a\"}\n assert set(fst.generate(\"b\")) == {\"b\"}\n assert set(fst.generate(\"c\")) == set()", "def test_notImplementedLessThan(self):\n self.assertEqual(Comparable(1).__lt__(object()), NotImplemented)", "def test_not_rxx_equivalent(self):\n gate = SwapGate\n with self.assertRaises(QiskitError) as exc:\n TwoQubitControlledUDecomposer(gate)\n self.assertIn(\n \"Equivalent gate needs to take exactly 1 angle parameter.\", exc.exception.message\n )", "def missing_values(df):\n\n # penalise missing review scores\n df[\"prop_review_score\"].fillna(-1, inplace=True)\n df[\"prop_location_score1\"].fillna(-1, inplace=True)\n df[\"prop_location_score2\"].fillna(-1, inplace=True)\n df[\"visitor_hist_starrating\"].fillna(-1, inplace=True)\n df[\"visitor_hist_adr_usd\"].fillna(-1, inplace=True)\n\n # replace price by mean of hotels with same starrating\n mean_price_starrating = df.groupby(\"prop_starrating\")[\"prop_log_historical_price\"].transform(\"mean\")\n df[\"prop_log_historical_price\"].fillna(mean_price_starrating, inplace=True)\n\n # fill by worst possible value in dataset\n aff_min = df[\"srch_query_affinity_score\"].min()\n df[\"srch_query_affinity_score\"].fillna(aff_min, inplace=True)\n\n # TODO: is dit worst???? hoezo is verder weg slechter?\n orig_max = df[\"orig_destination_distance\"].max()\n df[\"orig_destination_distance\"].fillna(orig_max, inplace=True)\n\n # remaining mv's are replaced by mean of column\n # df = df.fillna(df.mean())\n print(\"er zijn nog zoveel nans: \", df.isnull().sum().sum())\n\n return df" ]
[ "0.6356365", "0.6314329", "0.56324995", "0.5260674", "0.5232673", "0.51086223", "0.5028781", "0.5027332", "0.49481612", "0.48879817", "0.4887651", "0.48581392", "0.4851787", "0.48394328", "0.48073387", "0.48042664", "0.47770894", "0.47716808", "0.4759484", "0.47486883", "0.47274226", "0.47232836", "0.47153628", "0.4714673", "0.47144112", "0.4707077", "0.4698117", "0.46872255", "0.46520314", "0.4643336", "0.46312854", "0.46165857", "0.46093032", "0.45991904", "0.45927265", "0.45895317", "0.45553607", "0.45550314", "0.45404685", "0.45404685", "0.45312065", "0.45241743", "0.45226714", "0.45226714", "0.45148984", "0.4500773", "0.44851986", "0.44800222", "0.44800064", "0.44798666", "0.44612488", "0.44598353", "0.44582385", "0.44498858", "0.44437742", "0.44426334", "0.44364014", "0.44364014", "0.44333836", "0.44309828", "0.44302157", "0.4428263", "0.44263417", "0.4423512", "0.4422614", "0.4419074", "0.44178027", "0.44175133", "0.44039476", "0.43941382", "0.43907502", "0.43855304", "0.4378287", "0.43776235", "0.43752736", "0.43741813", "0.43685377", "0.43682975", "0.4363379", "0.4361929", "0.43546623", "0.4353992", "0.43534848", "0.4353147", "0.4351677", "0.43496957", "0.43492135", "0.434722", "0.43469942", "0.43469942", "0.43469942", "0.43439764", "0.43405426", "0.43369326", "0.43357158", "0.43336225", "0.43304914", "0.43284062", "0.43277046", "0.43252683", "0.4322795" ]
0.0
-1
Create a heatmap from a numpy array and two lists of labels.
def heatmap(data, row_labels, col_labels, ax=None, cbar_kw={}, cbarlabel="", **kwargs): if not ax: ax = plt.gca() # Plot the heatmap im = ax.imshow(data, **kwargs) # Create colorbar cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw) cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom") # We want to show all ticks... ax.set_xticks(np.arange(data.shape[1])) ax.set_yticks(np.arange(data.shape[0])) # ... and label them with the respective list entries. ax.set_xticklabels(col_labels) ax.set_yticklabels(row_labels) # Let the horizontal axes labeling appear on top. ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False) # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=-30, ha="right", rotation_mode="anchor") # Turn spines off and create white grid. # ax.spines[:].set_visible(False) ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True) ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True) # ax.grid(which="minor", color="w", linestyle='-', linewidth=3) ax.tick_params(which="minor", bottom=False, left=False) return im, cbar
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def heatmap(data, row_labels, col_labels, ax=None,\r\n cbar_kw={}, cbarlabel=\"\", title = \"Default\", x_title=\" \",y_title=\" \",saveFile = None, **kwargs):", "def heatmap(data, row_labels, col_labels, ax=None, cbar_kw={}, cbarlabel=\"\", **kwargs):\r\n\r\n if not ax:\r\n ax = plt.gca()\r\n\r\n # Plot the heatmap\r\n im = ax.imshow(data, **kwargs)\r\n\r\n # Create colorbar\r\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\r\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\r\n\r\n # We want to show all ticks...\r\n ax.set_xticks(np.arange(data.shape[1]))\r\n ax.set_yticks(np.arange(data.shape[0]))\r\n # ... and label them with the respective list entries.\r\n ax.set_xticklabels(col_labels)\r\n ax.set_yticklabels(row_labels)\r\n\r\n # Let the horizontal axes labeling appear on top.\r\n ax.tick_params(top=True, bottom=False,\r\n labeltop=True, labelbottom=False)\r\n\r\n # Rotate the tick labels and set their alignment.\r\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\r\n rotation_mode=\"anchor\")\r\n\r\n # Turn spines off and create white grid.\r\n # ax.spines[:].set_visible(False)\r\n\r\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\r\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\r\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\r\n ax.tick_params(which=\"minor\", bottom=False, left=False)\r\n\r\n return im, cbar", "def generate_heatmap(data, labels_dict, file_title, plot_title):\n\n fig = plt.figure()\n ax = sn.heatmap(data,\n linewidths=0.3)\n figure = ax.get_figure()\n\n if labels_dict:\n ax.set_xlabel(labels_dict[\"x\"])\n ax.set_ylabel(labels_dict[\"y\"])\n if plot_title:\n ax.set_title(plot_title)\n\n figure.savefig(file_title)", "def heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "def heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "def heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "def heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n #ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n #ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n #ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "def plot_labels(lbl: scipy.ndarray, lbl_count: int) -> None:\n color_map = scipy.rand(lbl_count, 3)\n color_map = matplotlib.colors.ListedColormap(color_map)\n plt.imshow(lbl, cmap=color_map)\n plt.show()", "def heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", vmin = 0, vmax = 100, **kwargs):\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs, vmin = vmin, vmax = vmax)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, fraction=0.0272, pad=0.04, extend='both', ticks=[-100, -50, 0, 50, 100], **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "def heatmap(mat, x_label=None, y_label=None, axes=None,\n title=None, save=False):\n sns.heatmap(mat)\n plt.show()", "def heatmap(data, row_labels, col_labels, ax=None, cbar_kw={}, cbarlabel=\"\", **kwargs):\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n rotation_mode=\"anchor\")\n plt.title('epsilon')\n plt.ylabel('gamma')\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1] + 1) - .5, minor=True)\n ax.set_yticks(np.arange(data.shape[0] + 1) - .5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "def heatmap(data, row_labels, col_labels, ax=None, cbar_kw={}, cbarlabel=\"\", **kwargs):\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap 绘制热图\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar 创建颜色条\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks... 显示所有的刻度\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries. 用相应的列表条目分别标记行和列\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top. 让水平轴标签显示在顶部\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment. 旋转刻度标签并设置其对齐方式。\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid. 关闭spines 创建白色的网格\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "def relabel_with_map_array(image, label_list, measurement_list):\n from skimage.util import map_array\n return map_array(np.asarray(image), np.asarray(label_list), np.array(measurement_list))", "def heatmap(data, row_labels, col_labels, ax=None,\r\n cbar_kw={}, cbarlabel=\"\", **kwargs):\r\n\r\n if not ax:\r\n ax = plt.gca()\r\n\r\n # Plot the heatmap\r\n im = ax.imshow(data, **kwargs)\r\n\r\n # Create colorbar\r\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\r\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\r\n\r\n # We want to show all ticks...\r\n n_ticks = 20\r\n if data.shape[1]>n_ticks :\r\n ax.set_xticks(np.linspace(0 ,data.shape[1], num = n_ticks , dtype=int))\r\n ax.set_yticks(np.linspace(0 ,data.shape[0], num = n_ticks , dtype=int))\r\n ax.set_xticklabels(np.linspace(0 ,data.shape[1], num = n_ticks , dtype=int))\r\n ax.set_yticklabels(np.linspace(0 ,data.shape[0], num = n_ticks , dtype=int))\r\n else:\r\n ax.set_xticks(np.arange(data.shape[1]))\r\n ax.set_yticks(np.arange(data.shape[0]))\r\n # ... and label them with the respective list entries.\r\n ax.set_xticklabels(col_labels)\r\n ax.set_yticklabels(row_labels)\r\n\r\n # Let the horizontal axes labeling appear on top.\r\n ax.tick_params(top=True, bottom=False,\r\n labeltop=True, labelbottom=False)\r\n\r\n # Rotate the tick labels and set their alignment.\r\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",rotation_mode=\"anchor\")\r\n\r\n # Turn spines off and create white grid.\r\n for edge, spine in ax.spines.items():\r\n spine.set_visible(False)\r\n\r\n ax.set_xticks(np.arange(data.shape[1]+1)-0.5, minor=True)\r\n ax.set_yticks(np.arange(data.shape[0]+1)-0.5, minor=True)\r\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=0.0005)\r\n ax.tick_params(which=\"minor\", bottom=False, left=False)\r\n\r\n return im, cbar", "def heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbar_format=\"{x:.0f}\", cbarlabel=\"\", **kwargs):\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar_formatter = matplotlib.ticker.StrMethodFormatter(cbar_format) \n cbar = ax.figure.colorbar(im, ax=ax, format=cbar_formatter, **cbar_kw)\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n ax.set_xlabel(cbarlabel, rotation=0, va=\"baseline\")\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "def heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n \n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n\n cbar = ax.figure.colorbar(im, ax=ax, cax = cax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n #cbar = plt.colorbar(im, cax=cax)\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels, fontdict = font)\n ax.set_yticklabels(row_labels, fontdict = font)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "def timeit_heatmap(data, xlabel='xlabel', ylabel='ylabel', **kwargs):\n dataT = {}\n figs = []\n series = kwargs.get('series', (0,1))\n cmap = kwargs.get('cmap', cm.coolwarm)\n for k, v in data.items():\n dataT[k] = zip(*v)\n X, Y, Z = dataT[k][series[0]], dataT[k][series[1]], dataT[k][-1]\n left, right = min(X), max(X)\n bottom, top = min(Y), max(Y)\n extent = [left, right, bottom, top]\n wide, tall = (max(X)-min(X)+1), (max(Y)-min(Y)+1)\n intervalX = max(X) - min(heapq.nlargest(2,set(X)))\n intervalY = max(Y) - min(heapq.nlargest(2,set(Y)))\n if intervalX > 1: \n wide = 1 + wide/intervalX\n else:\n wide = 1\n if intervalY > 1: \n tall = 1 + tall/intervalY\n else: \n tall = 1\n # TODO: BUG: fix so that Z transposes with x & y series reversed\n Z = np.reshape(Z, [wide, tall])\n Z = list(zip(*Z)) # Z is transposed\n Z = [i for i in Z[::-1]] # Z is upside down\n fig, ax = plt.subplots()\n hmap = ax.imshow(Z, extent=extent, cmap=cmap, interpolation='nearest')\n fig.colorbar(hmap).set_label(\"time\")\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.set_title(substitute_titles(k,series))\n figs.append(fig)\n return figs", "def heatmap(values: List[float], ax: mpl.axes.Axes) -> mpl.axes.Axes:\n \n mf = pd.DataFrame(\n np.array(values).reshape(2, 2)\n )\n \n mf.columns, mf.index = ['True', 'False'], ['True', 'False']\n\n sns.heatmap(mf, annot=True, cmap='Blues', fmt='g', ax=ax)\n\n ax.set_xlabel('Predicted')\n ax.set_ylabel('Ground Truth')\n \n return ax", "def plot_heatmap(data, title, xlabel, ylabel):\n hm = plt.pcolor(data, cmap=cm.gray_r, vmin=0, vmax=1)\n\n plt.tick_params(labelsize=20)\n plt.axes().set_aspect('equal')\n\n plt.title(title, fontsize=33)\n plt.xlabel(xlabel, fontsize=30)\n plt.ylabel(ylabel, fontsize=30)\n\n Plotter.show(title)", "def score_heatmap(scores: np.ndarray):\n\n # plot the heatmap\n ax = sns.heatmap(scores.astype(int),\n annot=True,\n fmt='d',\n linewidths=0.5,\n square=True,\n cbar=False,\n cmap=plt.cm.Blues\n )\n\n # set the ticks for the labels\n ax.set_yticklabels(range(1,6))\n ax.set_xticklabels(range(1,11))", "def plot_heatmap(otu_table, row_labels, col_labels, filename='heatmap.pdf',\r\n width=5, height=5, textborder=.25, color_scheme=\"jet\"):\r\n nrow = len(otu_table.ObservationIds)\r\n ncol = len(otu_table.SampleIds)\r\n\r\n # determine appropriate font sizes for tick labels\r\n row_fontsize = get_fontsize(nrow)\r\n col_fontsize = get_fontsize(ncol)\r\n\r\n # create figure and plot heatmap\r\n fig = figure(figsize=(width, height))\r\n my_cmap = get_cmap(color_scheme)\r\n # numpy magic: [:,::-1] actually means fliplr()\r\n #imshow(x[:,::-1],interpolation='nearest', aspect='auto', cmap=my_cmap)\r\n\r\n data = [val for val in otu_table.iterObservationData()]\r\n imshow(fliplr(data), interpolation='nearest', aspect='auto', cmap=my_cmap)\r\n ax = fig.axes[0]\r\n\r\n # imshow is offset by .5 for some reason\r\n xlim(-.5, ncol - .5)\r\n ylim(-.5, nrow - .5)\r\n\r\n # add ticklabels to axes\r\n xticks(arange(ncol), col_labels[::-1], fontsize=col_fontsize)\r\n yticks(arange(nrow), row_labels, fontsize=row_fontsize)\r\n\r\n # turn off tick marks\r\n ax.xaxis.set_ticks_position('none')\r\n ax.yaxis.set_ticks_position('none')\r\n\r\n # rotate x ticklabels\r\n for label in ax.xaxis.get_ticklabels():\r\n label.set_rotation(90)\r\n\r\n # add space for tick labels\r\n fig.subplots_adjust(left=textborder, bottom=textborder)\r\n cb = colorbar() # grab the Colorbar instance\r\n # set colorbar tick labels to a reasonable value (normal is large)\r\n for t in cb.ax.get_yticklabels():\r\n t.set_fontsize(5)\r\n fig.savefig(filename)", "def render_heatmap(name,\n data,\n val_label=\"\",\n row_ticks=None,\n col_ticks=None,\n row_labels=None,\n col_labels=None,\n cbar_kw={},\n annotate_format=\"%.2f\",\n font_size=7,\n img_height=None,\n img_width=None,\n dpi=300,\n figsize=(2, 2),\n **kwargs):\n assert len(data.shape) == 2, \"Must be a rank-2 tensor!\"\n if not isinstance(data, np.ndarray):\n array = data.cpu().numpy()\n else:\n array = data\n fig, ax = plt.subplots(figsize=figsize)\n im, _ = _heatmap(\n array,\n row_ticks,\n col_ticks,\n row_labels,\n col_labels,\n ax,\n cbar_kw=cbar_kw,\n cbarlabel=val_label,\n **kwargs)\n if annotate_format != '':\n _annotate_heatmap(im, valfmt=annotate_format, size=font_size)\n return _convert_to_image(name, fig, dpi, img_height, img_width)", "def heat_map(path):\r\n x, y= np.loadtxt(fname=path, delimiter='\\t',dtype=int,\r\n usecols = (1,2), skiprows=100, unpack = True)\r\n\r\n fig, (ax,ax2) = plt.subplots(nrows=2, sharex=True, figsize=(20,10))\r\n\r\n extent = [x[0]-(x[1]-x[0])/2, x[-1]+(x[1]-x[0])/2,0,1]\r\n ax.imshow(y[np.newaxis,:], cmap=\"plasma\", aspect=\"auto\", extent=extent)\r\n ax2.plot(x,y)\r\n plt.tight_layout()\r\n return plt.show()", "def heatmap2d(matrix, title='Heatmap', ylabel='', xlabel='', caption='',\n color_min=None, color_max=None, out_file=None, line_indices=None,\n line_color='r', line_color_other='k', xticks=None, yticks=None):\n if isinstance(matrix, torch.Tensor):\n matrix = matrix.numpy()\n if isinstance(matrix, list):\n matrix = np.array(matrix)\n if line_indices is None:\n line_indices = {}\n\n full_xlabel = _add_caption(xlabel, caption)\n\n #rc('text', usetex=True)\n plt.imshow(matrix, cmap='viridis')\n\n ax = plt.gca()\n\n if xticks:\n ax.set_xticks(np.arange(len(xticks)))\n ax.set_xticklabels(xticks)\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n print(xticks)\n if yticks:\n ax.set_yticks(np.arange(len(yticks)))\n ax.set_yticklabels(yticks)\n print(yticks)\n\n # Add color limits\n\n plt.colorbar()\n plt.clim(color_min, color_max)\n plt.title(title)\n plt.xlabel(full_xlabel)\n plt.ylabel(ylabel)\n\n # Explicitly set x and y limits (adding lines will extend the limits if this\n # is not done)\n plt.ylim(( len(matrix), 0))\n plt.xlim((0, len(matrix[0])))\n\n # Add horizontal and vertical lines\n for key in line_indices:\n list_indices=line_indices[key]\n if key=='h3':\n for idx in list_indices:\n plt.vlines(idx - 0.5, ymin=0, ymax=len(matrix[0]), color=line_color)\n plt.hlines(idx - 0.5, xmin=0, xmax=len(matrix), color=line_color)\n else:\n for idx in list_indices:\n plt.vlines(idx - 0.5, ymin=0, ymax=len(matrix[0]), color=line_color_other)\n plt.hlines(idx - 0.5, xmin=0, xmax=len(matrix), color=line_color_other)\n\n # Output to file or to screen\n if out_file is not None:\n plt.savefig(out_file)\n else:\n plt.show()\n # rc('text', usetex=False)\n\n plt.close()", "def _heatmap(data,\n row_ticks=None,\n col_ticks=None,\n row_labels=None,\n col_labels=None,\n ax=None,\n cbar_kw={},\n cbarlabel=\"\",\n **kwargs):\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n if col_ticks is None:\n # show all the ticks by default\n col_ticks = np.arange(data.shape[1] + 1) - .5\n\n ax.set_xticks(col_ticks, minor=True)\n\n if row_ticks is None:\n # show all the ticks by default\n row_ticks = np.arange(data.shape[0] + 1) - .5\n\n ax.set_yticks(row_ticks, minor=True)\n\n # ... and label them with the respective list entries.\n if col_labels is not None:\n assert len(col_ticks) == len(col_labels), (\n \"'col_ticks' should have the \"\n \"same length as 'col_labels'\")\n ax.set_xticklabels(col_labels)\n\n if row_labels is not None:\n assert len(row_ticks) == len(row_labels), (\n \"'row_ticks' should have the \"\n \"same length as 'row_labels'\")\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(\n ax.get_xticklabels(), rotation=-30, ha=\"right\", rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n ax.spines[:].set_visible(False)\n\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar", "def generateHeatmap(title, unit, labels, sizeValues, xAxisName, legendPos, timeUnit):\n fig = preparePlot(title)\n ax = fig.axes[0]\n impls = sorted(list(sizeValues.keys()), key=cmp_to_key(compareFn))\n yposns = [val for (discard, val) in [extractDigits(impl) for impl in impls]]\n (yAxisName, discard) = extractDigits(impls[0])\n data = [sizeValues[k] for k in impls]\n nonesToNans(data)\n\n if False:\n print(\n \"Title: \",\n title,\n \"\\nunit: \",\n unit,\n \"\\nlabels:\",\n labels,\n \"\\nsizeValues: \",\n sizeValues,\n )\n print(\"impls: \", impls)\n print(\"yAxisName: \", yAxisName)\n print(\"unit: \", unit)\n print(\"timeUnit: \", timeUnit)\n print(\"data: \", data)\n\n # Do most of the work!\n im = ax.imshow(data, cmap=plt.get_cmap(\"plasma\"))\n ax.set_xlabel(xAxisName)\n ax.set_ylabel(yAxisName)\n # We want to show appropriate ticks\n # ... and label them with the respective list entries\n (labels, majorTicks, minorTicks) = generateLabelsTicks(labels)\n # print (\"labels: \", labels, \"\\nmajorTicks: \",majorTicks,\"\\nminorTicks: \",minorTicks)\n ax.set_xticks(majorTicks)\n ax.set_xticklabels(labels)\n if minorTicks:\n ax.set_xticks(minorTicks, minor=True)\n\n (labels, majorTicks, minorTicks) = generateLabelsTicks(yposns)\n ax.set_yticks(majorTicks)\n ax.set_yticklabels(labels)\n if minorTicks:\n ax.set_yticks(minorTicks, minor=True)\n\n # Add a colorbar\n cBar = plt.colorbar(im)\n finalisePlot(cBar.ax, title, None, fig, \"_map\", timeUnit)", "def plot_heatmap(model_dir, name, features, labels, num_classes):\n features_sort, _ = utils.sort_dataset(features, labels, \n classes=num_classes, stack=False)\n features_sort_ = np.vstack(features_sort)\n sim_mat = np.abs(features_sort_ @ features_sort_.T)\n\n # plt.rc('text', usetex=False)\n # plt.rcParams['font.family'] = 'serif'\n # plt.rcParams['font.serif'] = ['Times New Roman'] #+ plt.rcParams['font.serif']\n\n fig, ax = plt.subplots(figsize=(7, 5), sharey=True, sharex=True)\n im = ax.imshow(sim_mat, cmap='Blues')\n fig.colorbar(im, pad=0.02, drawedges=0, ticks=[0, 0.5, 1])\n ax.set_xticks(np.linspace(0, len(labels), num_classes+1))\n ax.set_yticks(np.linspace(0, len(labels), num_classes+1))\n [tick.label.set_fontsize(10) for tick in ax.xaxis.get_major_ticks()] \n [tick.label.set_fontsize(10) for tick in ax.yaxis.get_major_ticks()]\n fig.tight_layout()\n\n save_dir = os.path.join(model_dir, 'figures', 'heatmaps')\n os.makedirs(save_dir, exist_ok=True)\n file_name = os.path.join(save_dir, f\"{name}.png\")\n fig.savefig(file_name)\n print(\"Plot saved to: {}\".format(file_name))\n plt.close()", "def get_displayable_heatmap(array, # type: thelper.typedefs.ArrayType\n convert_rgb=True, # type: Optional[bool]\n ): # type: (...) -> thelper.typedefs.ArrayType\n if array.ndim != 2:\n array = np.squeeze(array)\n if array.ndim != 2:\n raise AssertionError(\"indexing should return a pre-squeezed array\")\n array_normalized = np.empty_like(array, dtype=np.uint8).copy() # copy needed here due to ocv 3.3 bug\n cv.normalize(array, array_normalized, 0, 255, cv.NORM_MINMAX, dtype=cv.CV_8U)\n heatmap = cv.applyColorMap(array_normalized, cv.COLORMAP_JET)\n if convert_rgb:\n heatmap = cv.cvtColor(heatmap, cv.COLOR_BGR2RGB)\n return heatmap", "def annotate_heatmap(X, ax, xlabels, ylabels, x_rot = 90, y_rot = 0, decimals = 1, color = \"w\"):\n\n ax.set_xticks(np.arange(0, len(xlabels), 1));\n ax.set_yticks(np.arange(0, len(ylabels), 1));\n\n ax.set_xticklabels(labels = xlabels, rotation = x_rot, fontsize = 'xx-small')\n ax.set_yticklabels(labels = ylabels, rotation = y_rot, fontsize = 'xx-small')\n\n # Loop over data dimensions and create text annotations.\n for i in range(len(ylabels)):\n for j in range(len(xlabels)):\n \n if (decimals == 0):\n text = ax.text(j, i, '{:.0f}'.format(X[i,j]), ha=\"center\", va=\"center\", color=color)\n if (decimals == 1):\n text = ax.text(j, i, '{:.1f}'.format(X[i,j]), ha=\"center\", va=\"center\", color=color)\n if (decimals == 2):\n text = ax.text(j, i, '{:.2f}'.format(X[i,j]), ha=\"center\", va=\"center\", color=color)\n return ax", "def create_label_colormap():\n colormap = np.array([\n [128, 64, 128],\n [244, 35, 232],\n [ 70, 70, 70],\n [102, 102, 156],\n [190, 153, 153],\n [153, 153, 153],\n [250, 170, 30],\n [220, 220, 0],\n [107, 142, 35],\n [152, 251, 152],\n [ 70, 130, 180],\n [220, 20, 60],\n [255, 0, 0],\n [ 0, 0, 142],\n [ 0, 0, 70],\n [ 0, 60, 100],\n [ 0, 80, 100],\n [ 0, 0, 230],\n [119, 11, 32],\n [ 0, 0, 0]], dtype=np.uint8)\n return colormap", "def heat_plot(matrix, filename, xTicks, yTicks, xLabel='X', yLabel='Y'):\n\tfig = plt.figure()\n\tax = fig.add_subplot(111)\n\tcax = ax.matshow(matrix, vmin=0, vmax=1)\n\tfig.colorbar(cax)\n\tticks = np.arange(0, matrix.shape[0], 1)\n\tax.set_xticks(ticks)\n\tax.set_yticks(ticks)\n\tax.set_xticklabels(xTicks)\n\tax.set_yticklabels(yTicks)\n\tax.set_xlabel(xLabel)\n\tax.set_ylabel(yLabel)\n\tplt.savefig(filename)\n\tplt.close()", "def heat_matrix (m, caption, ticks_labels_x, ticks_labels_y, colormap):\n\n plt.matshow (m, fignum = 0, aspect = 'auto', cmap = colormap[0], norm = colormap[1])\n plt.colorbar ()\n\n plt.xticks (ticks_labels_x[0], ticks_labels_x[1], rotation='vertical')\n plt.yticks (ticks_labels_y[0], ticks_labels_y[1])\n axes = plt.gca ()\n axes.tick_params (direction = 'out', pad = 5)\n\n plt.title (caption, y = 20.0)", "def create_label_map():\n\n cnt = 1\n tmp_array = np.array([10, 15, 25, 30, 40, 47, 57, 63, 69, 74, 81])\n dictionary = dict()\n dictionary[1] = 1\n for idx, val in enumerate(tmp_array):\n for j in range(cnt + 1, val):\n dictionary[j] = int(idx + 2)\n cnt = j\n return dictionary", "def heatMapAxes(data, dims=[0.1, 0.1, 0.7, 0.7], colors=pylab.cm.autumn, columns=None, rows=None, protColors=None, cIndex=None, fig=None, colorBar=False, axData=None):\r\n if fig is None:\r\n fig = pylab.figure()\r\n if axData is None:\r\n axData = fig.add_axes(dims)\r\n for i in range(len(columns)):\r\n axData.text(i, -0.5 , ' '+str(columns[i]), rotation=270, verticalalignment=\"top\", horizontalalignment=\"center\", fontsize=12)\r\n if protColors == None:\r\n for i in range(len(rows)):\r\n axData.text(-0.75, i, ' '+str(rows[i]), verticalalignment=\"center\", horizontalalignment=\"right\", fontsize=12)\r\n else:\r\n for i in range(len(rows)):\r\n axData.text(-0.75, i, ' '+str(rows[i]), verticalalignment=\"center\", horizontalalignment=\"right\", fontsize=12, color=cIndex(float(protColors[i])/(protColors.max()+1)))\r\n small = data.min()\r\n big = data.max()\r\n if math.fabs(small) > math.fabs(big):\r\n big = 0-small\r\n else:\r\n small = 0-big\r\n masked_array = numpy.ma.array (data, mask=numpy.isnan(data))\r\n colors.set_bad('grey',1.)\r\n figData = axData.imshow(masked_array, interpolation='nearest', cmap=colors, aspect='auto', origin='lower')\r\n if colorBar:\r\n fig.colorbar(figData, ax=axData, ticks=[0, 0.25, 0.50, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], pad=0.01, extend='neither')\r\n axData.set_xticks([])\r\n axData.set_yticks([])\r\n return figData", "def heatmap(filename, data):\n\n fig, ax = ppl.subplots(1)\n ppl.pcolormesh(fig, ax, data, vmin=-0.0016, vmax=0.0016)\n fig.savefig(filename + \".png\")", "def create_heat_map(source, response, max_keywords, start_year, interval):\n # Convert source dictionary, which is organized by keywords, to source_by_year dictionary\n # which is organized by year\n source_by_year = dict_by_year(source)\n # Same conversion for response dictionary\n response_by_year = dict_by_year(response)\n\n # Initialize heat map array\n heat_map = np.zeros((max_keywords, interval))\n\n # get list of sorted words from source. The intensity of these words will be\n # measured in the resonse dictionary and returned in the heat map\n sorted_words = sort_dict_keys_by_values(source_by_year[start_year])\n\n # c= column, r=row in heat map\n for c in range(interval):\n year = start_year + c\n for r in range(max_keywords):\n # sorted_words might not have as many keywords as max_keywords\n # if not leave it as zero (initialized above)\n if r < len(sorted_words):\n word = sorted_words[r]\n try:\n value = response_by_year[year][word]\n except KeyError: # The keyword in sorted_words may not exist\n value = 0 # in the response keywords for year\n finally:\n heat_map[r, c] = value\n return heat_map, sorted_words", "def create_heatmap(num_maps, height, width, all_joints, sigma, stride):\n heatmap = np.zeros((height, width, num_maps), dtype=np.float64)\n\n for joints in all_joints:\n for plane_idx, joint in enumerate(joints):\n if joint:\n _put_heatmap_on_plane(heatmap, plane_idx, joint, sigma, height, width, stride)\n\n # background\n heatmap[:, :, -1] = np.clip(1.0 - np.amax(heatmap, axis=2), 0.0, 1.0)\n\n return heatmap", "def plot_heatmap(fig, ax, data,\n xaxislabel=None, yaxislabel=None,\n xticklabels=None, yticklabels=None,\n title=None, grid=True,\n values_in_cells=True, round_values_in_cells=2,\n legend=False,\n fontsize_axislabel=None,\n fontsize_axisticks=None,\n fontsize_cell_values=None):\n\n if not isinstance(data, np.ndarray):\n data = np.array(data)\n\n if data.ndim != 2:\n raise ValueError('`data` must be a 2D matrix/array')\n\n # draw basic heatmap\n cax = ax.matshow(data)\n\n # draw legend\n if legend:\n fig.colorbar(cax)\n\n # set title\n if title:\n ax.set_title(title, y=1.25)\n\n n_rows, n_cols = data.shape\n\n # draw values in cells\n if values_in_cells:\n textcol_thresh = data.min() + (data.max() - data.min()) / 2\n x_indices, y_indices = np.meshgrid(np.arange(n_cols), np.arange(n_rows))\n for x, y in zip(x_indices.flatten(), y_indices.flatten()):\n val = data[y, x]\n # lower values get white text color for better visibility\n textcol = 'white' if val < textcol_thresh else 'black'\n disp_val = round(val, round_values_in_cells) if round_values_in_cells is not None else val\n ax.text(x, y, disp_val, va='center', ha='center', color=textcol, fontsize=fontsize_cell_values)\n\n # customize axes\n if xaxislabel:\n ax.set_xlabel(xaxislabel)\n if yaxislabel:\n ax.set_ylabel(yaxislabel)\n\n if fontsize_axislabel:\n for item in (ax.xaxis.label, ax.yaxis.label):\n item.set_fontsize(fontsize_axislabel)\n\n ax.set_xticks(np.arange(0, n_cols))\n ax.set_yticks(np.arange(0, n_rows))\n\n if xticklabels is not None:\n ax.set_xticklabels(xticklabels, rotation=45, ha='left')\n if yticklabels is not None:\n ax.set_yticklabels(yticklabels)\n\n if fontsize_axisticks:\n for label in (ax.get_xticklabels() + ax.get_yticklabels()):\n label.set_fontsize(fontsize_axisticks)\n\n # gridlines based on minor ticks\n if grid:\n ax.set_xticks(np.arange(-.5, n_cols), minor=True)\n ax.set_yticks(np.arange(-.5, n_rows), minor=True)\n ax.grid(which='minor', color='w', linestyle='-', linewidth=1)\n\n return fig, ax", "def _cmd_heatmap(args):\n cnarrs = []\n for fname in args.filenames:\n cnarr = read_cna(fname)\n if args.adjust_xy:\n is_sample_female = verify_sample_sex(\n cnarr, args.sample_sex, args.male_reference, args.diploid_parx_genome\n )\n cnarr = cnarr.shift_xx(args.male_reference, is_sample_female, args.diploid_parx_genome)\n cnarrs.append(cnarr)\n heatmap.do_heatmap(\n cnarrs,\n args.chromosome,\n args.desaturate,\n args.by_bin,\n args.delim_sampl,\n args.vertical,\n args.title,\n )\n if args.output:\n oformat = os.path.splitext(args.output)[-1].replace(\".\", \"\")\n pyplot.savefig(args.output, format=oformat, bbox_inches=\"tight\")\n logging.info(\"Wrote %s\", args.output)\n else:\n pyplot.show()", "def np_to_belief(np_array,labels):\n return dict((l,np_array[0,i]) for i,l in enumerate(labels))", "def transform_multilabel_as_multihot(label_list,label_size):\n result=np.zeros(label_size)\n #set those location as 1, all else place as 0.\n result[label_list] = 1\n return result", "def transform_multilabel_as_multihot(label_list,label_size):\n result=np.zeros(label_size)\n #set those location as 1, all else place as 0.\n result[label_list] = 1\n return result", "def create_fig_2d(self, data_array_2d, output_fn='', xlabel='', ylabel='', title=''):", "def heatmap(df, cmap ='RdBu' ):\n\n # TODO: mpld3 does not display axis labels properly\n\n # TODO: Replace with an interactive plot, see bokeh:\n # http://bokeh.pydata.org/docs/gallery/les_mis.html\n\n fig, ax = plt.subplots()\n data = df.as_matrix()\n if isinstance(cmap, str):\n cmap = plt.get_cmap(cmap)\n\n ax.pcolor(data, cmap = cmap)\n ax.set_xticks(np.arange(data.shape[1])+0.5, minor = False)\n ax.set_xticklabels(df.columns)\n \n ax.set_yticks(np.arange(data.shape[0])+0.5, minor = False)\n ax.set_yticklabels(df.index)\n ax.invert_yaxis()\n ax.xaxis.tick_top()\n\n return fig", "def plot_heatmap(matrix, path, xlabel=None, ylabel=None):\n plt.close('all')\n df_cm = pd.DataFrame(matrix)\n _ = plt.figure(figsize=(10, 7))\n heatmap = sns.heatmap(df_cm)\n if xlabel: plt.xlabel(xlabel)\n if ylabel: plt.ylabel(ylabel)\n plt.tight_layout()\n make_dir(images_dir)\n plt.savefig(path)", "def heatmap2d(data, valueL='lastrow', color='cool', size=12, marker='o',alpha=0.4,\n save=False, savepath='./' ):\n from pandas.plotting import scatter_matrix\n data = np.array(data)\n if valueL=='lastrow':\n valueL=data[:, -1]\n data = np.delete(data, obj=-1, axis=1)\n\n normalizedValueL = list( (valueL - min(valueL)) / (max(valueL) - min(valueL)) )\n\n if color=='hot':\n colors = plt.cm.hot_r(normalizedValueL)\n # For color bar display\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.hot_r)\n elif color=='cool':\n colors = plt.cm.cool_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.cool_r)\n elif color=='hsv':\n colors = plt.cm.hsv_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.hsv_r)\n elif color=='jet':\n colors = plt.cm.jet_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.jet_r)\n elif color=='gray':\n colors = plt.cm.gray_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.gray_r)\n elif color=='spring':\n colors = plt.cm.spring_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.spring_r)\n elif color=='summer':\n colors = plt.cm.summer_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.summer_r)\n elif color=='autumn':\n colors = plt.cm.autumn_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.autumn_r)\n elif color=='winter':\n colors = plt.cm.winter_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.winter_r)\n else:\n print('Since there is no color, it will be the default cool')\n colors = plt.cm.cool_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.cool_r)\n\n colmap.set_array(valueL)\n\n plt.figure()\n\n ax_matrix = scatter_matrix(pd.DataFrame(data), c=colors, s=size, marker=marker, alpha=alpha)\n\n # For color bar display\n plt.colorbar(colmap, ax=ax_matrix)\n if save==True:\n date = datetime.datetime.now()\n plt.savefig(savepath+'scatter_matrix_'+str(date.year)+'_'+ str(date.month)+ \\\n '_'+str(date.day)+'_'+str(date.hour)+'_'+ \\\n str(date.minute)+'_'+str(date.second), dpi=150)\n plt.show()", "def heatmap(self, *args, **kwargs):\n obj = self.pcolormesh(*args, **kwargs)\n xlocator, ylocator = None, None\n if hasattr(obj, '_coordinates'):\n coords = obj._coordinates\n coords = (coords[1:, ...] + coords[:-1, ...]) / 2\n coords = (coords[:, 1:, :] + coords[:, :-1, :]) / 2\n xlocator, ylocator = coords[0, :, 0], coords[:, 0, 1]\n self.format(\n xgrid=False, ygrid=False, xtickminor=False, ytickminor=False,\n xlocator=xlocator, ylocator=ylocator,\n )\n return obj", "def plot_heatmap_overtime(mat, subplot, titlestr,\n ylabels=[], xlabels=[],\n ax=None,\n show_y_labels=True, show_x_labels=False,\n indicecolors=[], colors=[],\n sharey=None,\n fontsize=FiguresConfig.LARGE_FONT_SIZE,\n cbarlabel=\"\",\n cmapname='inferno'):\n assert len(indicecolors) == len(colors)\n\n if ax is None:\n ax = plt.subplot(subplot, sharey=sharey) # initialize ax\n # set title\n ax.set_title(titlestr, fontsize=fontsize)\n\n # get the size of the matrix to plot\n mat_size = mat.shape[0]\n time_size = mat.shape[1]\n\n # set the yticks & color\n y_ticks = np.arange(mat_size).astype(int)\n\n # plot the heatmap\n # cmap = plt.set_cmap(cmapname)\n if cmapname == 'OrRd':\n bmap = brewer2mpl.get_map(\"OrRd\", 'Sequential', 9, reverse=False)\n cmap = bmap.mpl_colormap\n elif cmapname == 'inferno':\n cmap = 'inferno'\n else:\n cmap = cmapname\n\n # cmap = 'viridis'\n img = ax.imshow(mat,\n origin='lower',\n cmap=cmap,\n aspect='auto',\n interpolation='nearest',\n alpha=0.3,\n )\n # set a grid on the plot\n ax.grid(True, color='grey')\n\n # set x ticks and ylabels\n if show_x_labels:\n # set the xticks & color\n x_ticks = np.array(\n np.arange(0, time_size, time_size / 10), dtype=np.int32)\n x_color = 'k'\n\n ax.set_xticks(x_ticks)\n ax.set_xticklabels(xlabels[x_ticks])\n\n # set y ticks and ylabels\n if show_y_labels:\n # get the ylabbels\n region_labels = np.array(\n [\"%d. %s\" % l for l in zip(range(mat_size), ylabels)])\n # region_labels = np.array(ylabels)\n\n ax.set_yticks(y_ticks)\n ax.set_yticklabels(region_labels, fontsize=fontsize / 1.5)\n\n # # check if there was only one color set\n ticklabels = ax.get_yticklabels(minor=False)\n\n # set colors based on lists passed in\n for inds, color in zip(indicecolors, colors):\n for idx in inds:\n ticklabels[idx].set_color(color)\n ax.set_yticklabels(ticklabels)\n else:\n ax.set_yticklabels([])\n\n # set tick ylabels and markers along the heatmap x/y axis\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(fontsize / 1.5)\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(fontsize / 1.5)\n\n # format the object correctly\n ax.autoscale(tight=True)\n # make a color bar\n cbar, cax1 = BasePlotter.set_colorbar(BasePlotter, img, ax, cbarlabel)\n cbar.set_label(cbarlabel, rotation=270,\n fontsize=fontsize, labelpad=60)\n cax1.tick_params(labelsize=fontsize)\n return ax", "def make_label_map(path, label_list):\r\n \r\n img = []\r\n for name in path:\r\n now = np.zeros((224,224))\r\n im = cv2.resize(cv2.imread(name), (224,224)).tolist()\r\n for y, i in enumerate(im):\r\n for x, j in enumerate(i):\r\n try:\r\n now[y, x] = label_list.index(j)\r\n\r\n except ValueError:\r\n now[y, x] = 0\r\n\r\n img.append(now)\r\n return img", "def plotHeatMap(X, classes, title=None, fmt='.2g', ax=None, xlabel=None, ylabel=None):\n ax = sns.heatmap(X, xticklabels=classes, yticklabels=classes, annot=True, \\\n fmt=fmt, cmap=plt.cm.Blues, ax=ax) #notation: \"annot\" not \"annote\"\n bottom, top = ax.get_ylim()\n ax.set_ylim(bottom + 0.5, top - 0.5)\n if title:\n ax.set_title(title)\n if xlabel:\n ax.set_xlabel(xlabel)\n if ylabel:\n ax.set_ylabel(ylabel)", "def map_label_colors(array, ignore_vals=[0]):\n colset = [(166, 206, 227),\n (31, 120, 180),\n (178, 223, 138),\n (51, 160, 44),\n (251, 154, 153),\n (227, 26, 28),\n (253, 191, 111),\n (255, 127, 0),\n (202, 178, 214),\n (106, 61, 154),\n (255, 255, 153),\n (177, 89, 40)]\n levels = np.unique(array)\n levels = [l for l in levels if l not in ignore_vals]\n if len(levels) == 0:\n return\n if len(levels) == 1:\n return({levels[0]: colset[0]})\n step = len(colset) / (len(levels) - 1)\n\n col_idx = np.arange(0, len(colset), step)\n colors = {}\n for idx in range(len(levels)):\n colors[levels[idx]] = colset[col_idx[idx]]\n return colors", "def label_heatmap(\n dfi, color_pal, \n dfi_labels,rc_labels, dfi_pal,rc_pal, \n df_legend_position=(0, 0.4), rc_legend_position=(0, 0.57),\n label_column=\"subtype\",\n continous=False, cbar_location=(0.15,0.4,0.02,0.1),\n r_c=False, c_c=False, y_t=False, x_t=False, \n show_tick=False, tick_l=None,\n col_name=\"Protein\", row_name=\"Sample\",\n dfi_legend_title=\"Protein State\",\n rc_legend_title=\"Subtype\", \n figure_name=\"Test.png\", dp=600):\n \n # set overall font style\n plt.rc('font',family='Times New Roman') \n\n # set heatmap color panel\n dfi_lut = dict(zip(dfi_labels, dfi_pal)) # one by one\n\n # set row_color panel, this two line is for tcga paper\n# labs = ['Atypical','Basal','Classical','Mesenchymal']\n# rc_lut = dict(zip(labs, rc_pal)) # one by one \n rc_lut = dict(zip(rc_labels.unique(), rc_pal)) # one by one \n rc_colors = rc_labels.map(rc_lut) # lut to all labels\n\n # plot step\n g = sns.clustermap(\n dfi.drop(label_column, axis=1), figsize=(1.8, 1.8),\n row_cluster=r_c, col_cluster=c_c,\n yticklabels=y_t, xticklabels=x_t,\n row_colors=[rc_colors], \n # Add colored class labels using data frame created from node and network colors\n cmap=color_pal) # Make the plot look better when many rows/cols\n\n ax0 = g.ax_heatmap\n ax0.set_xlabel(col_name, fontsize=10)\n ax0.set_ylabel(row_name, fontsize=10)\n\n # show some special gene\n if show_tick == True: \n if c_c == False:\n b = list(dfi.columns)\n else:\n b = list(dfi.iloc[:, g.dendrogram_col.reordered_ind].columns)\n print(b)\n\n c = set(b) & set(tick_l)\n d = [b.index(ele) for ele in c]\n ax0.set_xticks(d)\n ax0.set_xticklabels(c, rotation=90, fontsize=6)\n \n ax1 = g.cax\n\n # set legend of heatmap if the data is discrete data, continous data legend in ax4\n if continous==False:\n for label in dfi_labels: \n ax0.bar(0, 0, color=dfi_lut[label], label=label, linewidth=0)\n ax0_legend = ax0.legend(\n loc=\"center\", ncol=1, \n bbox_transform=plt.gcf().transFigure, bbox_to_anchor=df_legend_position, \n prop={'size': 6})\n ax0_legend.set_title(dfi_legend_title, prop={'size':6})\n ax1.set_visible(False)\n else:\n ax1.set_visible(True)\n ax1.set_title(\"Expression\", fontsize=6)\n min_v = np.min(np.min(dfi.drop(label_column, axis=1)))\n max_v = np.max(np.max(dfi.drop(label_column, axis=1)))\n ax1.yaxis.set_ticks([min_v, (min_v + max_v)/2, max_v])\n ax1.yaxis.set_ticklabels([\"Low\", \"Normal\", \"High\"], fontsize=6)\n ax1.set_position(cbar_location) \n \n # set legend of row color bars\n ax2 = g.ax_row_colors\n for label in rc_labels.unique():\n# for label in labs:\n ax2.bar(0, 0, color=rc_lut[label], label=label, linewidth=0)\n ax2_legend = ax2.legend(\n loc=\"center\", ncol=1, \n bbox_transform=plt.gcf().transFigure, bbox_to_anchor=rc_legend_position,\n prop={'size': 6})\n ax2_legend.set_title(rc_legend_title, prop={'size':6})\n\n ax3 = g.ax_row_dendrogram\n ax3.set_visible(False)\n\n ax4 = g.ax_col_dendrogram\n ax4.set_visible(False)\n\n g.savefig(figure_name, dpi=dp)", "def to_multi_label_matrix(target_labels: List[List[str]], label_names: List[str]) -> np.ndarray:\n def map_multi_label_line(line_labels: List[str]) -> List[int]:\n return [1 if label in line_labels else 0 for label in label_names]\n\n return np.array(list(map(map_multi_label_line, target_labels)))", "def recreate_image(codebook, labels, w, h, d):\r\n image = np.zeros((w, h, d))\r\n label_idx = 0\r\n for i in range(w):\r\n for j in range(h):\r\n image[i][j] = codebook[labels[label_idx]]\r\n label_idx += 1\r\n return image", "def label_to_mask(labels):\n # get the image size\n h, w = labels.shape\n\n # build a color to label map\n idx_to_color = {}\n for label in class_info:\n idx_to_color[class_info[label].id] = class_info[label].color\n\n # generate label matrix\n mask = np.zeros((h, w, 3), dtype=np.uint8)\n for y in range(h):\n for x in range(w):\n id = labels[y, x]\n r, g, b = idx_to_color[id]\n mask[y, x] = np.array([b, g, r])\n\n return mask", "def one_hot_encode(label, label_values):\n semantic_map = []\n for colour in label_values:\n equality = np.equal(label, colour)\n class_map = np.all(equality, axis = -1)\n semantic_map.append(class_map)\n semantic_map = np.stack(semantic_map, axis=-1)\n\n return semantic_map", "def confusion_matrix_heatmap(y_test, preds, classification_labels):\n labels = list(set(y_test))\n long_labels = [ll + \" (\" + str(l) + \")\" for ll, l\n in zip(classification_labels, labels)]\n cm = confusion_matrix(y_test, preds, labels=labels)\n fig = plt.figure(figsize=(20, 20))\n ax = fig.add_subplot(111)\n cax = ax.matshow(cm)\n plt.title('Confusion matrix of the classifier')\n fig.colorbar(cax)\n ax.set_xticks(np.arange(len(labels)))\n ax.set_yticks(np.arange(len(labels)))\n ax.set_xticklabels(labels, rotation=45)\n ax.set_yticklabels(long_labels)\n\n for i in range(len(cm)):\n for j in range(len(cm)):\n text = ax.text(j, i, cm[i, j],\n ha=\"center\", va=\"center\", color=\"w\")\n\n plt.xlabel('Predicted')\n plt.ylabel('True')\n # fig.tight_layout()\n plt.show()", "def add_1d_heatmaps(self,\n maps: dict,\n background: np.ndarray,\n map2d_size: Union[tuple, list],\n K: int,\n interval: int = 10):\n y_startpoint, x_startpoint = [int(1.1*map2d_size[1]),\n int(0.1*map2d_size[0])],\\\n [int(0.1*map2d_size[1]),\n int(1.1*map2d_size[0])]\n x_startpoint[1] += interval * 2\n y_startpoint[0] += interval * 2\n add = interval + 10\n for i in range(K):\n self.image_cover(background, maps['x'][i], x_startpoint[0],\n x_startpoint[1])\n cv.putText(background, str(i),\n (x_startpoint[0] - 30, x_startpoint[1] + 10),\n cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)\n self.image_cover(background, maps['y'][i], y_startpoint[0],\n y_startpoint[1])\n cv.putText(background, str(i),\n (y_startpoint[0], y_startpoint[1] - 5),\n cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)\n x_startpoint[1] += add\n y_startpoint[0] += add\n return background[:x_startpoint[1] + y_startpoint[1] +\n 1, :y_startpoint[0] + x_startpoint[0] + 1]", "def plot_heatmap(mi, output):\n\tfig = plt.figure()\n\tdata = np.array(mi)\n\tfig, ax = plt.subplots()\n\theatmap = ax.pcolor(data, cmap=plt.cm.jet)\n\n\tax.invert_yaxis()\n\tax.xaxis.tick_top()\n\n\tax.set_xlabel('Seq 2')\n\tax.set_ylabel('Seq 1')\n\tax.xaxis.set_label_position('top')\n\n\tax.set_xlim(0, len(mi[0]))\n\tax.set_ylim(len(mi), 0)\n\n\txmajor_ticks = np.arange(0, len(mi[1]), 20)\n\txminor_ticks = np.arange(0, len(mi[1]), 1)\n\n\tymajor_ticks = np.arange(0, len(mi), 20)\n\tyminor_ticks = np.arange(0, len(mi), 1)\n\n\tax.tick_params(axis = 'both', which = 'major', labelsize = 5)\n\tax.tick_params(axis = 'both', which = 'minor', labelsize = 0)\n\n\tax.set_xticks(xmajor_ticks)\n\tax.set_xticks(xminor_ticks, minor = True)\n\tax.set_yticks(ymajor_ticks)\n\tax.set_yticks(yminor_ticks, minor = True)\n\n\tax.tick_params(which = 'both', direction = 'out')\n\n\tplt.xticks(rotation=90)\n\n\tcb = plt.colorbar(heatmap)\n\tcb.set_label('MI value')\n\n\tfig.savefig(output, dpi = 700)", "def create_heatmap(df):\n\n fig = go.Figure(data=go.Heatmap(\n z=df.values.tolist(),\n x=df.columns,\n #y=[classifier for classifier in df.index.values.tolist()],\n y = df.index.values.tolist(),\n hoverongaps = False,\n xgap = 3,\n ygap = 3,\n colorscale=[[0.0, 'rgb(165,0,38)'], [0.1111111111111111, 'rgb(215,48,39)'], [0.2222222222222222, 'rgb(244,109,67)'], [0.3333333333333333, 'rgb(253,174,97)'], [0.4444444444444444, 'rgb(254,224,144)'], [0.5555555555555556, 'rgb(224,243,248)'], [0.6666666666666666, 'rgb(171,217,233)'], [0.7777777777777778, 'rgb(116,173,209)'], [0.8888888888888888, 'rgb(69,117,180)'], [1.0, 'rgb(49,54,149)']]\n ),\n )\n return fig", "def heatmap(island_results):\n kart_herb = []\n kart_carn = []\n for row in island_results:\n h_row = []\n c_row = []\n for cell in row:\n h_row.append(cell[\"herbivores\"])\n c_row.append(cell[\"carnivores\"])\n kart_herb.append(h_row)\n kart_carn.append(c_row)\n return kart_herb, kart_carn", "def make_heatmap(self):\n\n self.get_selected_categories_and_codes()\n codes = deepcopy(self.codes)\n if len(codes) > 40:\n codes = codes[:40]\n Message(self.app, _(\"Too many codes\"), _(\"Too many codes for display. Restricted to 40\")).exec()\n # Filters\n heatmap_type = self.ui.comboBox_heatmap.currentText()\n if heatmap_type == \"\":\n return\n title = heatmap_type + \" \" + _(\"Heatmap\")\n self.get_selected_categories_and_codes()\n y_labels = []\n for c in codes:\n y_labels.append(c['name'])\n category = self.ui.comboBox_category.currentText()\n self.ui.lineEdit_filter.setText(\"\")\n self.ui.comboBox_case.setCurrentIndex(0)\n self.ui.comboBox_file.setCurrentIndex(0)\n owner, subtitle = self.owner_and_subtitle_helper()\n\n # Get all the coded data\n data = []\n x_labels = []\n cur = self.app.conn.cursor()\n if heatmap_type == \"File\":\n if not self.attribute_file_ids:\n sql = \"select id, name from source order by name\"\n cur.execute(sql)\n files = cur.fetchall()\n else:\n attr_msg, file_ids_txt = self.get_file_ids()\n subtitle += attr_msg\n sql = \"select id, name from source where id \" + file_ids_txt + \" order by name\"\n cur.execute(sql)\n files = cur.fetchall()\n if len(files) > 40:\n files = files[:40]\n Message(self.app, _(\"Too many files\"), _(\"Too many files for display. Restricted to 40\")).exec()\n for f in files:\n x_labels.append(f[1])\n # Calculate the frequency of each code in each file\n # Each row is a code, each column is a file\n for code_ in codes:\n code_counts = []\n for f in files:\n code_counts.append(self.heatmap_counter_by_file_and_code(owner, f[0], code_['cid']))\n data.append(code_counts)\n if heatmap_type == \"Case\":\n if not self.attribute_case_ids_and_names: # self.attribute_file_ids:\n sql = \"select caseid, name from cases order by name\"\n cur.execute(sql)\n cases = cur.fetchall()\n if len(cases) > 40:\n cases = cases[:40]\n Message(self.app, _(\"Too many cases\"), _(\"Too many cases for display. Restricted to 40\")).exec()\n for c in cases:\n x_labels.append(c[1])\n # Calculate the frequency of each code in each file\n # Each row is a code, each column is a file\n for code_ in codes:\n code_counts = []\n for c in cases:\n cur.execute(\"SELECT fid FROM case_text where caseid=?\", [c[0]])\n fids = cur.fetchall()\n case_counts = 0\n for fid in fids:\n case_counts += self.heatmap_counter_by_file_and_code(owner, fid[0], code_['cid'])\n code_counts.append(case_counts)\n data.append(code_counts)\n else:\n attr_msg, file_ids_txt = self.get_file_ids()\n print(self.attribute_case_ids_and_names)\n for c in self.attribute_case_ids_and_names:\n x_labels.append(c[1])\n subtitle += attr_msg\n # Calculate the frequency of each code in each file\n # Each row is a code, each column is a file\n for code_ in codes:\n code_counts = []\n for c in self.attribute_case_ids_and_names:\n cur.execute(\"SELECT fid FROM case_text where caseid=?\", [c[0]])\n fids = cur.fetchall()\n # TODO revise fids if file parameters selected\n case_counts = 0\n for fid in fids:\n case_counts += self.heatmap_counter_by_file_and_code(owner, fid[0], code_['cid'])\n code_counts.append(case_counts)\n data.append(code_counts)\n # Create the plot\n fig = px.imshow(data,\n labels=dict(x=heatmap_type, y=\"Codes\", color=\"Count\"),\n x=x_labels,\n y=y_labels,\n title=title+subtitle\n )\n fig.update_xaxes(side=\"top\")\n fig.show()\n self.helper_export_html(fig)\n self.ui.comboBox_heatmap.blockSignals(True)\n self.ui.comboBox_heatmap.setCurrentIndex(0)\n self.ui.comboBox_heatmap.blockSignals(False)", "def recreate_image(codebook, labels, w, h):\n d = codebook.shape[1]\n image = np.zeros((w, h, d))\n label_idx = 0\n for i in range(w):\n for j in range(h):\n image[i][j] = codebook[labels[label_idx]]\n label_idx += 1\n return image", "def recreate_image(codebook, labels, w, h):\r\n d = codebook.shape[1]\r\n image = np.zeros((w, h, d))\r\n label_idx = 0\r\n for i in range(w):\r\n for j in range(h):\r\n image[i][j] = codebook[labels[label_idx]]\r\n label_idx += 1\r\n return image", "def recreate_image(codebook, labels, w, h):\n d = codebook.shape[1]\n image = np.zeros((w, h, d))\n label_idx = 0\n for i in range(w):\n for j in range(h):\n image[i][j] = codebook[int(labels[label_idx])]\n label_idx += 1\n return image", "def construct_new_ref_map(labels: np.ndarray, samples: list, ref_map_shape: list):\n new_ref_map = np.zeros(shape=ref_map_shape) + BG_CLASS\n for i, indexes in enumerate(samples):\n new_ref_map[indexes[ROW_AXIS], indexes[COLUMNS_AXIS]] = labels[i]\n return new_ref_map.astype(int)", "def add_heat(img, bbox_list):\n heatmap = np.zeros_like(img[:,:,0]).astype(np.float)\n for box in bbox_list:\n # Assuming each \"box\" takes the form ((x1, y1), (x2, y2))\n heatmap[box[0][0]:box[0][1], box[1][0]:box[1][1]] += 1\n #heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1\n return heatmap", "def triangleplot(samples,labels):\n \n f,axarr = pl.subplots(samples.shape[1],samples.shape[1],figsize=(3*samples.shape[1],3*samples.shape[1]))\n \n for row in range(0,samples.shape[1]):\n for col in range(0,samples.shape[1]):\n # been burned too many times by unintentionally altering arrays\n x,y = deepcopy(samples[:,col]), deepcopy(samples[:,row])\n \n # Shield ourselves against nans or infinities.\n x = x[np.isfinite(x) & np.isfinite(y)]\n y = y[np.isfinite(x) & np.isfinite(y)]\n \n # do some unit conversions for the sake of our collective sanity\n if 'lambda' in labels[col]: x*=1e6 # convert a wavelength to um from m\n if 'lambda' in labels[row]: y*=1e6\n if 'L$_\\\\odot$' in labels[col]: x /= 1e12 # divide down luminosity\n if 'L$_\\\\odot$' in labels[row]: y /= 1e12\n \n # figure out some sensible axis limits\n xstd = np.ediff1d(np.percentile(x,[15.87,84.13]))[0]/2\n ystd = np.ediff1d(np.percentile(y,[15.87,84.13]))[0]/2\n xmin,xmax = np.median(x)-6*xstd, np.median(x)+6*xstd\n ymin,ymax = np.median(y)-6*ystd, np.median(y)+6*ystd\n \n \n if row>col:\n try: marginalize_2d(x,y,axarr[row,col],\\\n extent=[xmin,xmax,ymin,ymax],bins=max(np.floor(x.size/1000),50))\n except ValueError:\n print(labels[row],labels[col])\n raise ValueError(\"One of the columns has no dynamic range\")\n if col>0: pl.setp(axarr[row,col].get_yticklabels(),visible=False)\n else: axarr[row,col].set_ylabel(labels[row],fontsize='x-large')\n if row<axarr.shape[0]-1: pl.setp(axarr[row,col].get_xticklabels(),visible=False)\n else: axarr[row,col].set_xlabel(labels[col],fontsize='x-large')\n axarr[row,col].xaxis.set_major_locator(MaxNLocator(5))\n axarr[row,col].yaxis.set_major_locator(MaxNLocator(5))\n elif row==col:\n marginalize_1d(x,axarr[row,col],extent=[xmin,xmax],bins=max(np.floor(x.size/1000),50))\n if row==axarr.shape[0]-1: axarr[row,col].set_xlabel(labels[col],fontsize='x-large')\n if col<axarr.shape[0]-1: pl.setp(axarr[row,col].get_xticklabels(),visible=False)\n axarr[row,col].xaxis.set_major_locator(MaxNLocator(5))\n \n else:\n axarr[row,col].set_axis_off()\n \n return f, axarr", "def create_data_matrices(self, labels, image_path_lst, colormap):\n try:\n data_matrix = list()\n label_matrix = list()\n for index, image in enumerate(image_path_lst):\n # path = os.path.join(data_path, category)\n\n try:\n imag = self.wrangler.read_image(image_path=image, color_space=colormap)\n\n data_matrix.append(imag)\n label_matrix.append(labels[index])\n except Exception as e:\n print(e)\n\n combined = list(zip(data_matrix, label_matrix))\n shuffle(combined)\n data_matrix[:], label_matrix[:] = zip(*combined)\n\n return data_matrix, label_matrix\n except Exception as e:\n self.logger.exception(e)\n sys.exit(1)", "def recreate_image(codebook, labels, w, h):\n d = codebook.shape[1]\n image = np.zeros((w, h, d))\n label_idx = 0\n for i in range(w):\n for j in range(h):\n image[i][j] = codebook[labels[label_idx]]\n label_idx += 1\n return image", "def preprocess_labels(label, number_slices):\n labels = [[] for i in range(np.array(label).shape[0])]\n\n for j in range(np.array(label).shape[0]):\n if type(label) is not np.ndarray:\n for i in range(number_slices):\n labels[j].append(np.array(Image.open(label[0][i]), dtype=np.uint8))\n\n label = np.array(labels[0])\n label = label.transpose((1, 2, 0))\n max_mask = np.max(label) * 0.5\n label = np.greater(label, max_mask)\n label = np.expand_dims(label, axis=0)\n\n return label", "def heatmap(df, imshow=True, zlabel='z', ax=None, cax=None, cbarkwargs=dict(), cbarlabelkwargs=dict(), **kwargs):\n if ax is None:\n ax = plt.gcf().add_subplot(111)\n if imshow:\n im = ax.imshow(df, extent=(min(df.columns), max(df.columns), min(df.index), max(df.index)), aspect='auto', **kwargs)\n else:\n X, Y = np.meshgrid(df.columns, df.index)\n # automatic axis scaling does not work if there are nans\n defaultkwargs = dict(vmin=np.nanmin(df), vmax=np.nanmax(df))\n defaultkwargs.update(kwargs)\n im = ax.pcolormesh(X, Y, df, **defaultkwargs)\n #FIXME: Once color matplotlib colormesh is fixed (PR submitted) the following line should suffice\n #im = ax.pcolormesh(X, Y, df, **kwargs)\n if cax is None:\n cbar = plt.gcf().colorbar(im, ax=ax, **cbarkwargs)\n else:\n cbar = plt.gcf().colorbar(im, cax=cax, **cbarkwargs)\n if zlabel is not None:\n cbar.set_label(zlabel, **cbarlabelkwargs)\n # workaround for pdf/svg export for more smoothness\n # see matplotlib colorbar documentation\n cbar.solids.set_edgecolor(\"face\")\n # lower limit\n ax.set_xlim(min(df.columns), max(df.columns))\n ax.set_ylim(min(df.index), max(df.index))\n ax.set_xlabel(df.columns.name)\n ax.set_ylabel(df.index.name)\n return im, cbar", "def heatmap(args):\n p = OptionParser(heatmap.__doc__)\n p.add_option(\"--stacks\",\n default=\"Exons,Introns,DNA_transposons,Retrotransposons\",\n help=\"Features to plot in stackplot [default: %default]\")\n p.add_option(\"--heatmaps\",\n default=\"Copia,Gypsy,hAT,Helitron,Introns,Exons\",\n help=\"Features to plot in heatmaps [default: %default]\")\n p.add_option(\"--meres\", default=None,\n help=\"Extra centromere / telomere features [default: %default]\")\n add_window_options(p)\n opts, args, iopts = p.set_image_options(args, figsize=\"8x5\")\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n fastafile, chr = args\n window, shift, subtract = check_window_options(opts)\n\n stacks = opts.stacks.split(\",\")\n heatmaps = opts.heatmaps.split(\",\")\n stackbeds = get_beds(stacks)\n heatmapbeds = get_beds(heatmaps)\n stackbins = get_binfiles(stackbeds, fastafile, shift, subtract=subtract)\n heatmapbins = get_binfiles(heatmapbeds, fastafile, shift, subtract=subtract)\n\n margin = .06\n inner = .015\n clen = Sizes(fastafile).mapping[chr]\n\n fig = plt.figure(1, (iopts.w, iopts.h))\n root = fig.add_axes([0, 0, 1, 1])\n\n # Gauge\n ratio = draw_gauge(root, margin, clen, rightmargin=4 * margin)\n yinterval = .3\n xx = margin\n yy = 1 - margin\n yy -= yinterval\n xlen = clen / ratio\n cc = chr\n if \"_\" in chr:\n ca, cb = chr.split(\"_\")\n cc = ca[0].upper() + cb\n\n root.add_patch(Rectangle((xx, yy), xlen, yinterval - inner, color=gray))\n ax = fig.add_axes([xx, yy, xlen, yinterval - inner])\n\n nbins = get_nbins(clen, shift)\n\n owindow = clen / 100\n if owindow > window:\n window = owindow / shift * shift\n\n stackplot(ax, stackbins, nbins, palette, chr, window, shift)\n ax.text(.1, .9, cc, va=\"top\", zorder=100, transform=ax.transAxes,\n bbox=dict(boxstyle=\"round\", fc=\"w\", alpha=.5))\n\n # Legends\n xx += xlen + .01\n yspace = (yinterval - inner) / (len(stackbins) + 1)\n yy = 1 - margin - yinterval\n for s, p in zip(stacks, palette):\n s = s.replace(\"_\", \" \")\n s = Registration.get(s, s)\n\n yy += yspace\n root.add_patch(Rectangle((xx, yy), inner, inner, color=p, lw=0))\n root.text(xx + 1.5 * inner, yy, s, size=10)\n\n yh = .05 # Heatmap height\n # Heatmaps\n xx = margin\n yy = 1 - margin - yinterval - inner\n for s, p in zip(heatmaps, heatmapbins):\n s = s.replace(\"_\", \" \")\n s = Registration.get(s, s)\n\n yy -= yh\n m = stackarray(p, chr, window, shift)\n\n Y = np.array([m, m])\n root.imshow(Y, extent=(xx, xx + xlen, yy, yy + yh - inner),\n interpolation=\"nearest\", aspect=\"auto\")\n root.text(xx + xlen + .01, yy, s, size=10)\n\n yy -= yh\n\n meres = opts.meres\n if meres:\n bed = Bed(meres)\n for b in bed:\n if b.seqid != chr:\n continue\n pos = (b.start + b.end) / 2\n cpos = pos / ratio\n xx = margin + cpos\n accn = b.accn.capitalize()\n root.add_patch(CirclePolygon((xx, yy), radius=.01, fc=\"m\", ec=\"m\"))\n root.text(xx + .014, yy, accn, va=\"center\", color=\"m\")\n\n root.set_xlim(0, 1)\n root.set_ylim(0, 1)\n root.set_axis_off()\n\n image_name = chr + \".\" + iopts.format\n savefig(image_name, dpi=iopts.dpi, iopts=iopts)", "def from_labels_map(cls, labels_map):\n mask_index = cls()\n for index, value in iteritems(labels_map):\n mask_index[index] = CategoricalAttribute(\"label\", value)\n\n return mask_index", "def heatmap_chart(df, title=\"\"):\r\n source = df.copy()\r\n source = source.reset_index()\r\n source = pd.melt(source, id_vars=\"index\", value_vars=df.columns)\r\n source.columns = [\"m1\", \"m2\", \"value\"]\r\n\r\n base = alt.Chart(source).encode(\r\n alt.X('m1:O', title=\"New Model\"),\r\n alt.Y(\"m2:O\", title=\"Baseline Model\"),\r\n ).properties(\r\n width=500,\r\n height=400,\r\n title=title,\r\n )\r\n rects = base.mark_rect().encode(\r\n color='value:Q',\r\n )\r\n text = base.mark_text(\r\n align='center',\r\n baseline='middle',\r\n color='black',\r\n size=12,\r\n dx=0,\r\n ).encode(\r\n text='value:Q',\r\n )\r\n return rects + text", "def plot_heatmaps_raw(data, threshold = None, precision = 1, cmap='magma'):\n \n plt.matshow(data, cmap=cmap, fignum = 0, aspect = \"auto\")\n\n if not threshold:\n threshold = ((np.max(data) + np.min(data))/2).mean()\n \n for (i, j), z in np.ndenumerate(data):\n if z < threshold:\n color = 'w'\n else:\n color = 'k'\n\n plt.text(j, i, f\"{z:0.{precision}f}\", ha = \"center\", va = \"center\", color = color)", "def heatmap2d(self, x, y, cs, labels, ptype, pval, save_path=None,\n show=False, draw=False, fixed=None, colorsMap='jet'):\n # cs = np.flipud(cs)\n cm = plt.get_cmap(colorsMap)\n if np.iscomplexobj(cs):\n self.log.warning('Plotting only real part of %s in heatmap',\n labels[2])\n cs = cs.real\n if fixed:\n if 'dielectric_profile' in save_path:\n cNorm = matplotlib.colors.Normalize(\n vmin=np.amin(0), vmax=np.amax(16))\n else:\n pass\n cNorm = matplotlib.colors.Normalize(\n vmin=np.amin(cs), vmax=np.amax(cs))\n # cNorm = matplotlib.colors.Normalize(\n # vmin=np.amin(0), vmax=np.amax(2.5))\n else:\n cNorm = matplotlib.colors.Normalize(\n vmin=np.amin(cs), vmax=np.amax(cs))\n # cNorm = matplotlib.colors.LogNorm(vmin=np.amin(cs)+.001, vmax=np.amax(cs))\n # cNorm = matplotlib.colors.LogNorm(vmin=1e13, vmax=np.amax(cs))\n scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(111)\n # ax.imshow(cs,cmap=cm,norm=cNorm,extent=[x.min(),x.max(),y.min(),y.max()],aspect='auto')\n ax.invert_yaxis()\n ax.pcolormesh(x, y, cs, cmap=cm, norm=cNorm)\n # extent=[x.min(),x.max(),y.min(),y.max()],aspect='auto')\n ax.grid(False)\n scalarMap.set_array(cs)\n # div = make_axes_locatable(ax)\n # zoom_ax = div.append_axes(\"right\",size='100%', pad=.5)\n # zoom_ax.imshow(cs[75:100,:], extent=[x.min(), x.max(), .8, 1.4])\n # zoom_ax.grid(False)\n # cax = div.append_axes(\"right\",size=\"100%\",pad=.05)\n cb = fig.colorbar(scalarMap)\n cb.set_label(labels[2])\n ax.set_xlabel(labels[0])\n ax.set_ylabel(labels[1])\n if draw:\n self.log.info('Beginning geometry drawing routines ...')\n ax = self.draw_geometry_2d(ptype, pval, ax)\n if save_path:\n fig.savefig(save_path, bbox_inches='tight')\n if show:\n plt.show()\n plt.close(fig)", "def construct_by_1d_array(cls, array, label_mat_shape, order='F'):\n assert len(label_mat_shape) == 2\n row, col = np.unravel_index(array, dims=label_mat_shape, order=order)\n return cls(data=[(row[i], col[i]) for i in range(len(row))], label_size=label_mat_shape[1])", "def _plot_some(arr, title_list=None, pmin=0, pmax=100, cmap='magma', **imshow_kwargs):\n import matplotlib.pyplot as plt\n imshow_kwargs['cmap'] = cmap\n\n def make_acceptable(a):\n return np.asarray(a)\n def color_image(a):\n return np.stack(map(to_color,a)) if 1 < a.shape[-1] <= 3 else a\n def max_project(a):\n ndim_allowed = 2 + int(1 <= a.shape[-1] <= 3)\n proj_axis = tuple(range(1, 1 + max(0, a[0].ndim - ndim_allowed)))\n return np.max(a, axis=proj_axis)\n\n arr = map(make_acceptable,arr)\n arr = map(color_image,arr)\n arr = map(max_project,arr)\n arr = list(arr)\n\n h = len(arr)\n w = len(arr[0])\n plt.gcf()\n for i in range(h):\n for j in range(w):\n plt.subplot(h, w, i * w + j + 1)\n try:\n plt.title(title_list[i][j], fontsize=8)\n except:\n pass\n img = arr[i][j]\n if pmin!=0 or pmax!=100:\n img = normalize(img,pmin=pmin,pmax=pmax,clip=True)\n plt.imshow(np.squeeze(img),**imshow_kwargs)\n plt.axis(\"off\")", "def colorize_label_map(label):\n if label.ndim != 2:\n raise ValueError('Expect 2-D input label. Got {}'.format(label.shape))\n\n colormap = colormap_ade20k\n label_mod = np.mod(label, len(colormap))\n return colormap[label_mod].astype(np.uint8)", "def onehot_encoding(labels, dim, device):\n out = th.zeros(list(labels.size()) + [dim]).to(device)\n out.scatter_(len(out.size()) - 1, labels.unsqueeze(-1), 1.0)\n return out", "def encode_labelmap(colour_img, colourlabelmap):\n colour_img = colour_img.astype(int)\n labels = np.zeros((colour_img.shape[0], colour_img.shape[1]), dtype=np.int16)\n for label_id, colour in enumerate(colourlabelmap):\n labels[np.where(np.all(colour == colour_img, axis=-1))] = label_id\n\n return labels", "def _plot_results(results, labels, score):\n \n labels_dict = {k: i for i, k in enumerate(labels)}\n rmin, rmax = min(results)[0], max(results)[0]\n to_color = lambda x : 0.8 * (x - rmax) / (rmin - rmax) + 0.5\n\n result_matrix = np.zeros((8, len(labels_dict)))\n labels_dict = {k: i for i, k in enumerate(labels)}\n for i, (v, r_labels) in enumerate(results):\n for k in r_labels:\n result_matrix[i, labels_dict[k]] = to_color(v)\n \n plt.figure(figsize=(11, 6))\n plt.imshow(result_matrix, cmap='Greys', aspect='auto')\n plt.xticks(np.arange(0, len(labels)), labels, rotation=90)\n plt.yticks(np.arange(0, len(results)), [round(x[0]) for x in results])\n plt.ylabel(score)\n plt.show()", "def create_label_image(prediction: np.ndarray, color_palette: OrderedDict):\n\n label_image = np.zeros(\n (prediction.shape[0], prediction.shape[1], 3), dtype=np.uint8\n )\n for idx, color in enumerate(color_palette):\n label_image[prediction == idx] = color\n return label_image", "def plot_heatmap(mi):\n\tfig = plt.figure()\n\tdata = np.array(mi)\n\tfig, ax = plt.subplots()\n\theatmap = ax.pcolor(data, cmap=plt.cm.jet)\n\n\tax.tick_params(direction='out')\n\n\tmajorLocator = MultipleLocator(20)\n\tmajorFormatter = FormatStrFormatter('%d')\n\tminorLocator = MultipleLocator(1)\n\n\tax.xaxis.set_major_locator(majorLocator)\n\tax.xaxis.set_major_formatter(majorFormatter)\n\tax.xaxis.set_minor_locator(minorLocator)\n\n\tax.yaxis.set_major_locator(majorLocator)\n\tax.yaxis.set_major_formatter(majorFormatter)\n\tax.yaxis.set_minor_locator(minorLocator)\n\n\tax.invert_yaxis()\n\tax.xaxis.tick_top()\n\n\t###check which seq belongs to each axe\n\tax.set_xlabel('Seq 2')\n\tax.set_ylabel('Seq 1')\n\n\tax.set_xlim(0, len(mi[1]))\n\tax.set_ylim(len(mi), 0)\n\n\tplt.xticks(rotation=90)\n\n\tcb = plt.colorbar(heatmap)\n\tcb.set_label('MI value')\n\n\t#pdf = PdfPages('heatmap.pdf')\n\t#pdf.savefig(fig)\n\tfig.savefig('heatmap.png')\n\t#pdf.close()", "def plotHeatmap(inputRunMatrix, tick_label, output_folder):\n\t# heatmap of run sim matrix\n\tinputRunMatrix = np.sqrt(inputRunMatrix)\n\tvmax = np.percentile(inputRunMatrix,95)\n\tvmin = np.amin(inputRunMatrix)\n\t\n\tfig,ax = plt.subplots()\n\tax = sns.heatmap(inputRunMatrix,vmin=vmin,vmax=vmax, \\\n xticklabels=tick_label,yticklabels=tick_label)\n\n\t# square the color bar tick label to undo sqrt of sim matrix\n\tc_bar = ax.collections[0].colorbar\n\tticLoc = c_bar.get_ticks()\n\tnewTic = [int(x*x) for x in ticLoc]\n\tc_bar.set_ticks(ticLoc)\n\tc_bar.set_ticklabels(newTic)\n\n\tplt.tight_layout()\n\tfig.savefig(output_folder + \"/heatmap.png\")\n\tplt.close(fig)", "def map_measurements_on_labels(labels_layer:\"napari.layers.Labels\", column:str = \"label\", viewer:\"napari.Viewer\" = None) -> \"napari.types.ImageData\":\n import pandas as pd\n import dask.array as da\n from dask import delayed\n from functools import partial\n\n labels = labels_layer.data\n table = pd.DataFrame(labels_layer.properties)\n\n # special treatment for time series\n if len(labels.shape) == 4:\n # determine how the Frame column is called; in case there is any\n frame_column = None\n for potential_frame_column in ['frame', 'Frame']:\n if potential_frame_column in table.keys():\n frame_column = potential_frame_column\n break\n\n # Relabel one timepoint\n output_sample = relabel_timepoint_with_map_array(labels, table, column, frame_column, 0)\n\n lazy_arrays = []\n for i in range(labels.shape[0]):\n # build a delayed function call for each timepoint\n lazy_processed_image = delayed(\n partial(relabel_timepoint_with_map_array, labels, table, column, frame_column, i)\n )\n lazy_arrays.append(\n lazy_processed_image()\n )\n\n # build an array of delayed arrays\n dask_arrays = [\n [da.from_delayed(\n delayed_reader,\n shape=output_sample.shape,\n dtype=output_sample.dtype)]\n if len(output_sample.shape) == 2\n else da.from_delayed(\n delayed_reader,\n shape=output_sample.shape,\n dtype=output_sample.dtype\n )\n for delayed_reader in lazy_arrays\n ]\n # Stack into one large dask.array\n stack = da.stack(\n dask_arrays,\n axis=0)\n return stack\n else:\n label_list = np.asarray(table['label']).tolist()\n measurement_list = np.asarray(table[column]).tolist()\n\n return relabel_with_map_array(labels, label_list, measurement_list)", "def plot_cm_dict(labels, preds, label_mapper, cmap='magma'):\n plot_heatmaps_raw(confusion_matrix(labels, preds), precision=0, cmap=cmap)\n plt.colorbar()\n plt.xticks(range(len(label_mapper)), label_mapper.values(), rotation=90)\n plt.yticks(range(len(label_mapper)), label_mapper.values(), rotation=0)\n plt.xlabel(\"Predicted Label\")\n plt.ylabel(\"True Label\")", "def plot_2D(a, b, data, labels, centers=None):\n if centers is not None:\n plt.scatter(centers[:, a], centers[:, b])\n plt.scatter(data[:, a], data[:, b], s=50, c=labels)\n plt.show()", "def plot_tiles(tiles, cmap=None, \n layout='llc', rotate_to_latlon=False,\n Arctic_cap_tile_location = 2,\n show_colorbar=False, \n show_cbar_label=False, \n show_tile_labels= True,\n cbar_label = '', \n fig_size = 9, \n less_output=True,\n **kwargs):\n\n # processing for dask array (?)\n if isinstance(tiles, dask.array.core.Array):\n tiles = np.asarray(tiles.squeeze())\n\n # get default colormap\n cmap, (cmin,cmax) = assign_colormap(tiles,cmap)\n\n #%%\n fig_num = -1\n for key in kwargs:\n if key == \"cmin\":\n cmin = kwargs[key]\n elif key == \"cmax\":\n cmax = kwargs[key]\n elif key == 'fig_num':\n fig_num = kwargs[key]\n else:\n print(\"unrecognized argument \", key)\n\n # if llc90, return array otherwise not implemented\n get_array = True\n nx = tiles.shape[-1]\n if nx != 90:\n get_array = False\n warnings.warn('Will not return array for non llc90 data')\n\n # set sizing for subplots\n fac1 = 1; fac2=1\n if show_tile_labels and show_colorbar:\n fac2 = 1.15\n\n if show_tile_labels==False:\n if show_colorbar:\n fac2 = 0.8766666666666666\n else:\n fac2 = 9.06/9\n \n if layout == 'llc' :\n nrows=5\n ncols=5\n\n # plotting of the tiles happens in a 5x5 grid\n # which tile to plot for any one of the 25 spots is indicated with a list\n # a value of negative one means do not plot anything in that spot.\n tile_order = np.array([-1, -1, 10, 11, 12, \\\n -1, 6, 7, 8, 9, \\\n 2, 5, -1, -1, -1, \\\n 1, 4, -1, -1, -1, \\\n 0, 3, -1, -1, -1])\n\n elif layout == 'latlon':\n ncols = 4\n nrows = 4\n\n # plotting of the tiles happens in a 4x4 grid\n # which tile to plot for any one of the 16 spots is indicated with a list\n # a value of negative one means do not plot anything in that spot.\n # the top row will have the Arctic tile. You can choose where the \n # Arctic tile goes. By default it goes in the second column.\n if Arctic_cap_tile_location not in [2,5,7,10]:\n print('Arctic Cap Alignment is not one of 2,5,7,10, using 2')\n Arctic_cap_tile_location = 2 \n \n if Arctic_cap_tile_location == 2: # plot in 1st position, column 1\n tile_order_top_row = [6, -1, -1, -1]\n elif Arctic_cap_tile_location == 5:\n tile_order_top_row = [-1, 6, -1, -1]\n elif Arctic_cap_tile_location == 7:# plot in 3rd position, column 3\n tile_order_top_row = [-1, -1, 6, -1]\n elif Arctic_cap_tile_location == 10:# plot in 4th position, column 4\n tile_order_top_row = [-1, -1, -1, 6]\n \n # the order of the rest of the tile is fixed. four columns each with \n # three rows.\n tile_order_bottom_rows =[2, 5, 7, 10, \\\n 1, 4, 8, 11, \\\n 0, 3, 9, 12]\n \n # these are lists so to combine tile_orde_first and tile_order_rest \n # you just add them in python (wierd). If these were numpy arrays \n # one would use np.concatenate()\n tile_order = tile_order_top_row + tile_order_bottom_rows\n\n # create fig object\n if fig_num > 0:\n f, axarr = plt.subplots(nrows, ncols, num=fig_num)\n else:\n f, axarr = plt.subplots(nrows, ncols)\n\n #%%\n f.set_size_inches(fac1*fig_size, fig_size*fac2)\n\n if show_tile_labels==False:\n f.subplots_adjust(wspace=0, hspace=0)\n \n # loop through the axes array and plot tiles where tile_order != -1\n cur_arr = np.zeros((nrows*nx,ncols*nx)) if get_array else None\n cur_tile = -1\n for i, ax in enumerate(axarr.ravel()):\n ax.axis('off')\n\n cur_tile_num = tile_order[i]\n have_tile = False\n\n if cur_tile_num >= 0:\n if type(tiles) == np.ndarray:\n have_tile = True\n cur_tile = tiles[cur_tile_num ]\n \n elif isinstance(tiles, dask.array.core.Array) or \\\n isinstance(tiles, xr.core.dataarray.DataArray):\n\n if cur_tile_num in tiles.tile :\n have_tile = True\n cur_tile = tiles.sel(tile=cur_tile_num)\n \n if have_tile:\n if (layout == 'latlon' and rotate_to_latlon and cur_tile_num == 6):\n if Arctic_cap_tile_location == 2:\n cur_tile = np.rot90(cur_tile,-1)\n elif Arctic_cap_tile_location == 7:\n cur_tile = np.rot90(cur_tile,-3)\n elif Arctic_cap_tile_location == 10:\n cur_tile = np.rot90(cur_tile,2)\n\n if (layout == 'latlon' and rotate_to_latlon and \n cur_tile_num > 6):\n \n cur_tile = np.rot90(cur_tile)\n \n im=ax.imshow(cur_tile, vmin=cmin, vmax=cmax, cmap=cmap, \n origin='lower')\n\n # axis handling\n ax.set_aspect('equal')\n ax.axis('on')\n if show_tile_labels:\n ax.set_title('Tile ' + str(cur_tile_num))\n \n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n # Generate array from this process\n colnum = ncols-1-int(i/ncols)\n rownum = i%nrows\n rownump1 = int(rownum + 1)\n colnump1 = int(colnum + 1)\n if not less_output:\n print('i=',i,rownum, colnum)\n \n if cur_tile_num>=0 and get_array:\n cur_arr[colnum*nx:colnump1*nx, rownum*nx:rownump1*nx] = cur_tile\n\n # show the colorbar\n if show_colorbar:\n if show_tile_labels:\n f.subplots_adjust(left=None, bottom=None, right=0.8)\n else:\n f.subplots_adjust(right=0.8, left=None, bottom=None,\n top=None, wspace=0, hspace=0)\n \n #[left, bottom, width, height]\n h=.6;w=.025\n cbar_ax = f.add_axes([0.85, (1-h)/2, w, h])\n cbar = f.colorbar(im, cax=cbar_ax)#, format='%.0e') \n if show_cbar_label:\n cbar.set_label(cbar_label)\n\n return f, cur_arr\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%", "def get_label_ix_mapping(labels):\n return {label: i for i, label in enumerate(labels)}", "def plot_confusion_matrix(conf_matrix, labels=[],\n title='调制识别混淆矩阵',\n cmap=cm.Blues, name=None):\n plt.figure(figsize=[7, 6], dpi=160)\n plt.imshow(conf_matrix, interpolation='nearest', cmap=cmap, origin='upper')\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(labels))\n plt.xticks(tick_marks, labels, rotation=45)\n plt.yticks(tick_marks, labels)\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n if name is None:\n plt.show()\n else:\n plt.savefig(name)", "def make_translations(dataset, labels):\n offset = 10\n translations = [\n (0, offset),\n (0, -offset),\n (offset, 0),\n (-offset, 0),\n (-offset, -offset),\n (-offset, offset),\n (offset, -offset),\n (offset, offset)\n ]\n\n was_flattened = (len(dataset[0].shape) == 1)\n augmented_dataset = []\n augmented_labels = []\n \n for image, label in zip(dataset, labels):\n if was_flattened:\n image = unflatten(image)\n \n height = image.shape[0]\n width = image.shape[1]\n \n for t_x, t_y in translations:\n new_image = np.zeros(image.shape)\n t_mat = np.array([[1,0,t_x],[0,1,t_y],[0,0,1]])\n\n for x in range(0, width):\n for y in range(0, height):\n old_coords = np.array([[x],[y],[1]])\n new_coords = t_mat.dot(old_coords) # translation here\n\n if new_coords[0] > 0 and new_coords[0] < width and new_coords[1] > 0 and new_coords[1] < height:\n new_image[new_coords[1], new_coords[0]] = image[y, x]\n \n if was_flattened:\n new_image.flatten()\n augmented_dataset.append(new_image)\n augmented_labels.append(label)\n\n return (augmented_dataset, augmented_labels)", "def plotHeat2D(x, y, z, lvls=25, title=\"Title\", xlab=\"x-axis\", ylab=\"y-axis\", zlab=\"z-axis\"):\n plt.contourf(x, y, z, cmap=plt.cm.inferno, norm=LogNorm(), levels=lvls)\n plt.title(title)\n plt.xlabel(xlab)\n plt.ylabel(ylab)", "def addlabels(x, y):\n\n for i in range(len(x)):\n plt.text(i, y[i], y[i], ha='center')", "def plot_lut(df, output):\n flatui = [\"#3d77d4\", \"#f0b05d\"]\n fig, ax = plt.subplots(figsize=(18,10))\n p = sns.heatmap(df, linewidths=0.1, annot=False, cbar=True, \n ax=ax, cmap=sns.color_palette(flatui), \n cbar_kws={'orientation': 'vertical',\n 'label': 'class'})\n\n colorbar = p.collections[0].colorbar\n colorbar.set_ticks([0.25, 0.75])\n colorbar.set_ticklabels(['0', '1'])\n\n plt.title('2D Look-Up Table')\n plt.xlabel('binned cluster width')\n plt.ylabel('binned tau')\n plt.savefig(output)", "def embed_labels(input_image, labels, nrow = 1):\n\tnew_width = input_image.width + 100\n\tnew_size = (new_width, input_image.height)\n\tnew_img = Image.new(\"RGB\", new_size, color='white')\n\tnew_img.paste(input_image, (0, 0))\n\tdraw = ImageDraw.Draw(new_img)\n\n\tfor i, s in enumerate(labels):\n\t\tx = float(i%nrow) * (input_image.width/float(nrow)) + (input_image.width/float(nrow)) * 1./4.\n\t\ty = int(i/nrow) * input_image.height/(len(labels)/nrow) + \\\n\t\t\tinput_image.height/(len(labels)/nrow) * 4./6.\n\t\tdraw.text(xy=(x, y), text=s, fill=(255, 255, 255))\n\n\treturn new_img", "def add_heat(heatmap, bbox_list):\n for box in bbox_list:\n\n # Assuming each \"box\" takes the form ((x1, y1), (x2, y2))\n heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1\n\n return heatmap", "def init_label_dict(num_classes):\n label_dict={}\n for i in range(num_classes):\n label_dict[i]=(0,0,0)\n return label_dict", "def create_heat_map(self, ax=None, block=True):\n plt.figure()\n ax = sns.heatmap(data=self.data, fmt=\"\", cmap='RdYlGn', linewidths=0.3, ax=ax)\n ax.invert_yaxis()\n ax.set(xlabel='Books index', ylabel='Books values over iterations', title='Heat map for the prediction result'\n ' of each book over iterations')\n\n # plt.show(block=block)" ]
[ "0.6765666", "0.6746936", "0.6712045", "0.6677427", "0.6677427", "0.6677427", "0.66198003", "0.66069126", "0.65382963", "0.6534117", "0.6513789", "0.6497601", "0.64536047", "0.6449943", "0.6434751", "0.63585454", "0.63225764", "0.62491506", "0.6228405", "0.6213863", "0.6179809", "0.6174125", "0.61117685", "0.61051387", "0.60818577", "0.6034454", "0.6024062", "0.6014109", "0.59785247", "0.59736055", "0.5930861", "0.59208125", "0.5883009", "0.58748734", "0.5868645", "0.5821777", "0.58173865", "0.5802142", "0.5801717", "0.57964736", "0.5792513", "0.5792513", "0.57658416", "0.57365674", "0.57302284", "0.572925", "0.5707312", "0.5706456", "0.5699972", "0.5654594", "0.563404", "0.5623211", "0.55932313", "0.55885094", "0.5577968", "0.5548511", "0.5548474", "0.55434155", "0.5517417", "0.5511124", "0.5498328", "0.5493266", "0.54620194", "0.5461728", "0.5431244", "0.5406417", "0.5406096", "0.5400325", "0.539777", "0.5396878", "0.5373704", "0.53651166", "0.5363782", "0.5359547", "0.5356194", "0.53544843", "0.53327006", "0.5328629", "0.5309284", "0.5303391", "0.5299678", "0.52941245", "0.5276224", "0.5275232", "0.52717775", "0.527013", "0.52522266", "0.52514917", "0.52514017", "0.52496266", "0.5229683", "0.52209574", "0.52180415", "0.52156323", "0.52084005", "0.52053297", "0.51899874", "0.5189035", "0.5185543", "0.5173207" ]
0.6688963
3
A method to open and parse UniProt according to their keywords. Similar to what was done in Ata et al. 2018.
def parse_records(self): for record in sp.parse(gzip.open( "./human_uniprot_04_07_20.gz", 'rt')): # print(record.taxonomy_id) # if record.organism != "Homo sapiens": # continue # print(record.features[0]) # for comment in record.comments: # if comment.startswith("SUBCELLULAR LOCATION"): # print(comment) self.extract_features_to_dict(record) self.extract_localization(record)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uniprot_txt_parser(uniprot_txt_lines):\n uniprot = {}\n entry_line = [i for i,l in enumerate(uniprot_txt_lines) if l[:2]=='ID']\n entry_line.append(len(uniprot_txt_lines))\n begin_end = [(begin,entry_line[i+1]) for i,begin in enumerate(entry_line[:-1])]\n for begin,end in begin_end:\n for line in uniprot_txt_lines[begin:end]:\n line = line.rstrip('\\r\\n')\n line = line.rstrip('.')\n line = line.replace(';',' ')\n words = line.split()\n if words[0] == 'AC':\n acc = words[1]\n uniprot[acc] = {}\n elif words[0] == 'DR' and words[1] =='InterPro':\n if uniprot[acc].has_key('interpro'):\n uniprot[acc]['interpro'].append((words[2],1))\n else:\n uniprot[acc]['interpro'] = [(words[2],1)]\n elif words[0] == 'DR' and words[1] == 'Pfam':\n if uniprot[acc].has_key('pfam'):\n uniprot[acc]['pfam'].append((words[2],int(words[-1])))\n else:\n uniprot[acc]['pfam'] = [(words[2],int(words[-1]))]\n elif words[0] == 'DR' and words[1] == 'SMART':\n if uniprot[acc].has_key('smart'):\n uniprot[acc]['smart'].append((words[2],words[-1]))\n else:\n uniprot[acc]['smart'] = [(words[2],words[-1])]\n elif words[0] == 'DR' and words[1] == 'SUPFAM':\n if uniprot[acc].has_key('supfam'):\n uniprot[acc]['supfam'].append((words[2],words[-1]))\n else:\n uniprot[acc]['supfam'] = [(words[2],words[-1])]\n elif words[0] == 'DR' and words[1] == 'PROSITE':\n if uniprot[acc].has_key('prosite'):\n uniprot[acc]['prosite'].append((words[2],words[-1]))\n else:\n uniprot[acc]['prosite'] = [(words[2],words[-1])]\n # elif words[0] == 'DR' and words[1] =='PDB':\n # w = words[-1].replace('/',' ')\n # w = w.replace('=',' ')\n # w = w.replace('-',' ')\n # w = w.split()\n # w = words[2:-1]+w\n\n # if uniprot[acc].has_key('pdb'):\n # uniprot[acc]['pdb'].append(w)\n # else:\n # uniprot[acc]['pdb'] = [w]\n\n return uniprot", "def keyword_extraction(file_content):\n\n # [question, question....]\n for key, value in file_content.items():\n seg, hidden = ltp.seg([key])\n # ner: [[('Nh', 2, 2)]]\n ner = ltp.ner(hidden)\n # keywords: [('PERSON', \"吴轩\")], tuple_item: ('Nh', 2, 2)\n keywords = [(tag_to_name[tuple_item[0]], to_string(seg[0][tuple_item[1]: tuple_item[2]+1])) for tuple_item in ner[0]]\n file_content[key].keywords = keywords\n\n return file_content", "def open_uniprotsite(prot_names):\n fasta_dict = {}\n for prot_id in prot_names:\n \n uniprot_link = \"https://www.uniprot.org/uniprot/\" + prot_id + \".fasta\"\n\n uniprot_fasta = urllib.request.urlopen(uniprot_link)\n fasta_sequence = uniprot_fasta.readlines()#.decode('utf-8')\n fasta_sequence = fasta_sequence[1:]\n fasta_sequence = list(f.decode('utf-8') for f in fasta_sequence)\n fasta_sequence = ''.join(fasta_sequence)\n fasta_sequence = fasta_sequence.replace('\\n','')\n\n fasta_dict[prot_id] = fasta_sequence\n uniprot_fasta.close()\n\n return fasta_dict", "def load_keywords():\n keywords = set()\n with open(os.path.join(BASE, \"data/keywords.txt\")) as fp:\n for line in fp:\n keywords.add(line.strip().lower())\n return keywords", "def uniprot_wd40(key='pfam',pdb=False):\n if key == 'pfam':\n query = 'database:(type:pfam id:PF00400) or database:(type:pfam id:PF12894) or database:(type:pfam id:PF16529) or database:(type:pfam id:PF16756)'\n elif key == 'smart':\n query = 'database:(type:smart id:SM00320)'\n elif key == 'supfam':\n query = 'database:(type:supfam id:SSF50978)'\n elif key == 'interpro_repeat':\n query = 'database:(type:interpro id:IPR001680)'\n elif key == 'interpro_domain':\n query = 'database:(type:interpro id:IPR017986)'\n elif key == 'uniprot_keyword':\n query = 'keyword:\"WD repeat\"'\n elif key == 'uniprot_repeat':\n query = 'annotation:(type:repeat wd)'\n elif key == 'prosite1':\n query = 'database:(type:prosite id:PS00678)'\n elif key == 'prosite2':\n query = 'database:(type:prosite id:PS50082)'\n elif key == 'prosite3':\n query = 'database:(type:prosite id:PS50294)'\n else:\n print 'wrong query key'\n return\n\n if pdb:\n query = query + ' AND '+ 'database:(type:pdb)'\n\n url = ' http://www.uniprot.org/uniprot/?'\n data ={\n 'query':query,\n 'format':'list',\n }\n data = urllib.urlencode(data)\n req = urllib2.Request(url,data)\n response = urllib2.urlopen(req)\n r = response.readlines()\n lines = set([line.rstrip('\\r\\n') for line in r])\n\n return key,lines", "def setKeys():\n keywords['c++'] = {}\n with open('cppkeywords.txt', 'r') as f:\n for i in f:\n i = i.strip('\\n')\n words = map(str, i.split())\n key = words[0]\n words.pop(0)\n keywords['c++'][key] = list(words)\n for j in words:\n MyDict.insert(j)\n keywords['py'] = {}\n with open('pykeywords.txt', 'r') as f:\n for i in f:\n i = i.strip('\\n')\n words = map(str, i.split())\n key = words[0]\n words.pop(0)\n keywords['py'][key] = list(words)\n for j in words:\n MyDict.insert(j)", "def __read_data__(self):\n with open(self.file, 'r') as data:\n sentence = []\n tags = []\n for line in data:\n terms = line.rstrip().split(WHITESPACE)\n for term in terms:\n word_tag = tuple(term.split(TAGCHAR))\n word = word_tag[0]\n tag = word_tag[1]\n self.word_tag_dict[word_tag] += 1\n self.tag_dict[tag] += 1\n self.__add_to_word_dict__(word, tag)\n if self.isNumberWord(word):\n self.numbers += 1\n if word[0].isupper() and len(sentence) > 0:\n self.cap_no_start += 1\n sentence.append(word)\n tags.append(tag)\n if tag == ENDOFSENTENCE:\n self.sentences.append(tuple(sentence))\n self.tags.append(tuple(tags))\n sentence = []\n tags = []", "def process_raw_phrases(file_path):", "def uniprot_wd40(key='pfam',pdb=False):\n if key == 'pfam':\n query = 'database:(type:pfam id:PF00400)'\n elif key == 'smart':\n query = 'database:(type:smart id:SM00320)'\n elif key == 'supfam':\n query = 'database:(type:supfam id:SSF50978)'\n elif key == 'interpro_repeat':\n query = 'database:(type:interpro id:IPR001680)'\n elif key == 'interpro_domain':\n query = 'database:(type:interpro id:IPR017986)'\n elif key == 'uniprot_keyword':\n query = 'keyword:\"WD repeat\"'\n elif key == 'uniprot_repeat':\n query = 'annotation:(type:repeat wd)'\n elif key == 'prosite1':\n query = 'database:(type:prosite id:PS00678)'\n elif key == 'prosite2':\n query = 'database:(type:prosite id:PS50082)'\n elif key == 'prosite3':\n query = 'database:(type:prosite id:PS50294)'\n else:\n print 'wrong query key'\n return\n\n if pdb:\n query = query + ' AND '+ 'database:(type:pdb)'\n\n url = ' http://www.uniprot.org/uniprot/?'\n data ={\n 'query':query,\n 'format':'list',\n 'compress':'no',\n 'inclue':'no',\n }\n data = urllib.urlencode(data)\n req = urllib2.Request(url,data)\n response = urllib2.urlopen(req)\n r = response.readlines()\n lines = set([line.rstrip('\\r\\n') for line in r])\n return lines", "def test_keywords_file(self):\n\n kwd_filename = unique_filename(suffix='.keywords')\n keywords = {'impact_summary': 'Describing the layer',\n 'category': 'impact',\n 'subcategory': 'flood',\n 'layer': None,\n 'kw': 'with:colon',\n 'with spaces': 'trailing_ws ',\n ' preceding_ws': ' mixed spaces ',\n 'number': 31,\n 'a_float ': 13.42,\n 'a_tuple': (1, 4, 'a'),\n 'a_list': [2, 5, 'b'],\n 'a_dict': {'I': 'love', 'cheese': 'cake', 'number': 5},\n 'a_nested_thing': [2, {'k': 17.8}, 'b', (1, 2)],\n 'an_expression': '37 + 5', # Evaluate to '37 + 5', not 42\n # Potentially dangerous - e.g. if calling rm\n 'dangerous': '__import__(\"os\").system(\"ls -l\")',\n 'yes': True,\n 'no': False}\n\n write_keywords(keywords, kwd_filename)\n msg = 'Keywords file %s was not created' % kwd_filename\n assert os.path.isfile(kwd_filename), msg\n\n fid = open(kwd_filename)\n for line in fid.readlines():\n fields = line.split(':')\n\n k = fields[0]\n v = ':'.join(fields[1:])\n\n msg = 'Did not find keyword \"%s\" in %s' % (k, keywords.keys())\n assert k in keywords, msg\n\n msg = 'Got value \"%s\", expected \"%s\"' % (v.strip(),\n str(keywords[k]).strip())\n assert v.strip() == str(keywords[k]).strip(), msg\n fid.close()\n\n x = read_keywords(kwd_filename)\n os.remove(kwd_filename)\n\n assert isinstance(x, dict)\n\n # Check keyword names\n for key in x:\n msg = 'Read unexpected key %s' % key\n assert key in keywords, msg\n\n for key in keywords:\n msg = 'Expected key %s was not read from %s' % (key,\n kwd_filename)\n assert key in x, msg\n\n # Check keyword values\n for key in keywords:\n refval = keywords[key] # Expected value\n newval = x[key] # Value from keywords file\n\n # Catch all - comparing string reprentations\n msg = ('Expected value \"%s\" was not read from \"%s\". '\n 'I got \"%s\"' % (refval, kwd_filename, newval))\n assert str(refval).strip() == str(newval), msg\n\n # Check None\n if refval is None:\n assert newval is None\n\n # Check Booleans - explicitly\n if refval is True:\n assert newval is True\n\n if refval is False:\n assert newval is False\n\n # Check equality of python structures\n if not isinstance(refval, basestring):\n msg = 'Expected %s but got %s' % (refval, newval)\n assert newval == refval, msg\n\n # Check catching wrong extensions\n kwd_filename = unique_filename(suffix='.xxxx')\n try:\n write_keywords(keywords, kwd_filename)\n except VerificationError:\n pass\n else:\n msg = 'Should have raised assertion error for wrong extension'\n raise Exception(msg)\n\n # Make a spatial layer with these keywords\n V = read_layer('%s/test_buildings.shp' % TESTDATA)\n V = Vector(data=V.get_data(),\n geometry=V.get_geometry(),\n projection=V.get_projection(),\n keywords=keywords)\n assert keywords['impact_summary'] == V.get_impact_summary()\n for key, val in V.get_keywords().items():\n msg = ('Expected keywords[%s] to be \"%s\" but '\n 'got \"%s\"' % (key, keywords[key], val))\n\n assert keywords[key] == val, msg\n #if key in [' preceding_ws', 'with spaces']:\n # # Accept that surrounding whitespace may be stripped\n # assert keywords[key].strip() == val, msg\n #else:\n # assert keywords[key] == val, msg", "def parse_text(filehandle: TextIO) -> Iterator[Fasta]:\n\n # Check that the file looks like UniProt text format\n first_line = next(filehandle)\n if not first_line.startswith(\"ID\"):\n raise TextParserError(\n \"Unexpected file format: first line of UniProt text file should start with 'ID'\"\n )\n filehandle.seek(0)\n\n fasta = Fasta(sequence=\"\")\n for line in filehandle:\n key = line[:2] # This is more efficient than using line.startswith\n if key == \"ID\":\n tokens = line.split()\n fasta.entry_name = tokens[1]\n fasta.reviewed = True if tokens[2] == \"Reviewed;\" else False\n elif key == \"AC\":\n if fasta.accession is None:\n accessions = line[5:].rstrip(\";\\n\").split(\"; \")\n fasta.accession = accessions[0]\n elif key == \"DT\":\n if \"sequence version\" in line:\n tokens = line[5:].strip(\".\\n\").split()\n fasta.version = int(tokens[3])\n elif key == \"DE\":\n if \"RecName\" in line:\n fasta.name = _extract_name(line)\n # Get the first SubName if no RecName found\n elif fasta.name is None and line[5:12] == \"SubName\":\n fasta.name = _extract_name(line)\n elif line[5:10] == \"Flags\" and \"Fragment\" in line:\n fasta.fragment = True\n elif key == \"GN\":\n if line[5:10] == \"Name=\":\n tokens = line[10:].split(\";\")\n # Remove evidence tags, if present\n gene_tokens = tokens[0].split(\" {\")\n fasta.gene = gene_tokens[0]\n elif key == \"OS\":\n # TODO: check for multiline species name (excluding brackets)\n if fasta.species is None:\n species_line = line[5:].strip().split(\" (\")\n fasta.species = species_line[0].strip(\".\")\n elif key == \"OX\":\n if \"NCBI_TaxID\" in line:\n tokens = line[5:].strip(\";\\n\").split(\"; \")\n # Remove evidence tag if present\n taxid_tokens = tokens[0][11:].split(\" {\")\n fasta.taxid = taxid_tokens[0]\n elif key == \"PE\":\n fasta.evidence = int(line[5])\n elif key == \" \":\n sequence_line = line.strip().replace(\" \", \"\")\n fasta.sequence += sequence_line\n elif key == \"//\":\n yield fasta\n fasta = Fasta(sequence=\"\")", "def read_input(fname):\n ignore = set()\n with codecs.open(fname, 'rb', encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n lemma, word, tag = line.split(\"\\t\")\n if word != u\"\":\n ignore.add((lemma, tag))\n return ignore", "def main():\n # call open_file() to get file pointer \n fd = open_file()\n # call fill completion to get dict, then close the openned file\n full_set = create_dict(fd)\n wrds = find_words(full_set)\n print(wrds)\n fd.close()\n # ask for a prefix in while loop", "def parse(self, infile):\r\n raise NotImplementedError()", "def _parse(self, infile):\n raise NotImplementedError()", "def __init__(self):\n self.grammar = defaultdict(list) # store the grammar and vocabulary", "def preprocess_corpus(train_sents):\n global lookupLexiconDict\n lookupLexiconDict = {}\n \n lexiconDir = getcwd()+'\\\\data\\\\lexicon'\n filesList = [hfile for hfile in listdir(lexiconDir) if path.isfile(lexiconDir+'\\\\'+hfile) ]\n \n decision_tags = ['facility','product','musicartist']\n fileMappingDict = \\\n {\n 'architecture.museum':'facility',\n 'automotive.make':'product',\n 'automotive.model':'product',\n 'award.award':'musicartist',\n 'base.events.festival_series':'geo-loc',\n #'bigdict':'@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@',\n 'book.newspaper':'company',\n 'broadcast.tv_channel':'tvshow',\n 'business.brand':'company',\n 'business.consumer_company':'company',\n 'business.consumer_product':'product',\n 'business.sponsor':'company',\n 'cap.1000':'geo-loc',\n 'cvg.computer_videogame':'product',\n 'cvg.cvg_developer':'company',\n 'cvg.cvg_platform':'product',\n 'education.university':'facility',\n 'english.stop':'O',\n 'firstname.5k':'person',\n 'government.government_agency':'company',\n 'internet.website':'company',\n 'lastname.5000':'person',\n 'location':'geo-loc',\n 'location.country':'geo-loc',\n 'lower.5000':'O',\n 'people.family_name':'person',\n 'people.person':'person',\n 'people.person.lastnames':'person', # <-----------------------------\n 'product':'product',\n 'sports.sports_league':'sportsteam',\n 'sports.sports_team':'sportsteam',\n 'time.holiday':'O',\n 'time.recurring_event':'O',\n 'transportation.road':'geo-loc',\n 'tv.tv_network':'tvshow',\n 'tv.tv_program':'tvshow',\n 'venture_capital.venture_funded_company':'company',\n 'venues':'geo-loc'\n }\n\n for lexFile in filesList:\n if lexFile not in fileMappingDict: continue\n print 'Processing ', lexFile\n \n with open(lexiconDir+'\\\\'+lexFile) as f:\n for line in f:\n line = line.lower().split()\n if len(line) == 1: low=0\n else:low=1\n for i in range(low,len(line)):\n key = tuple(line[:i+1])\n if key not in lookupLexiconDict:\n lookupLexiconDict[key] = [fileMappingDict[lexFile]]\n else:\n lookupLexiconDict[key].append(fileMappingDict[lexFile]) \n\n \n #pass ", "def read(fname):\n\tcmu_dict = split_cmu_dict.load_dict(TRAIN_FILEPATH_SRC)\n\tfor word, phonemes in cmu_dict.iteritems():\n\t\tyield word, phonemes.split()", "def __init__(self, infile):\n txt = infile.read()\n for block in self.splitter.split(txt):\n block = block.strip()\n if block:\n term = block.splitlines()[0].strip().decode('utf8')\n defn = \"\\n\".join(line.strip() for line in block.splitlines()[1:])\n self[term] = defn.decode('utf8')", "def parse_pkgsubmit(self):\n parser = pkgsubmitParser()\n with self.opener.open(PKGSUBMIT_URL) as f:\n parser.feed(f.read().decode())\n if parser.token:\n self.token = parser.token\n self.categories = parser.categories", "def determine_keywords(self):\n\n split = dict()\n split['email_cc'] = re.compile(\"^\\s*CC[-_]?MAIL[:=]\\s*(.*)\")\n split['email_cc2'] = re.compile(\"^\\s*C[Cc][:=]\\s*(.*)\")\n split['fixed_in'] = re.compile(\"^\\s*FIXED[-_]?IN[:=]\\s*(.*)\")\n\n numeric = dict()\n numeric['bug_fixed'] = re.compile(\"^\\s*(?:BUGS?|FEATURE)[:=]\\s*(.+)\")\n numeric['bug_cc'] = re.compile(\"^\\s*CCBUGS?[:=]\\s*(.+)\")\n\n presence = dict()\n presence['email_gui'] = re.compile(\"^\\s*GUI:\")\n presence['silent'] = re.compile(\"(?:CVS|SVN|GIT|SCM).?SILENT\")\n presence['notes'] = re.compile(\"(?:Notes added by 'git notes add'|Notes removed by 'git notes remove')\")\n\n results = defaultdict(list)\n for line in self.commit.message.split(\"\\n\"):\n # If our line starts with Summary: (as it does when using Arcanist's default template) then strip this off\n # This allows for people to fill keywords in the Differential Summary and have this work smoothly for them\n line = re.sub(\"^Summary: (.+)\", \"\\g<1>\", line)\n\n # Start processing our keywords...\n for (name, regex) in split.iteritems():\n match = re.match( regex, line )\n if match:\n results[name] += [result.strip() for result in match.group(1).split(\",\")]\n\n for (name, regex) in numeric.iteritems():\n match = re.match( regex, line )\n if match:\n results[name] += re.findall(\"(\\d{1,10})\", match.group(1))\n\n for (name, regex) in presence.iteritems():\n if re.match( regex, line ):\n results[name] = True\n\n self.keywords = results", "def readVLTUS(self): \n fname= os.environ['VMECFDIR'] +\"/CFG/ctp/DB/VALID.LTUS\"\n try:\n database=open(fname,\"r\") \n except IOError:\n print \"Cannot open \",fname\n return None\n else:\n print \"File \",fname,\" open successfuly.\"\n #print \"database= \",database\n lines=database.readlines()\n database.close() \n #print lines,len(lines) \n Detectors=[] \n for i in lines:\n for j in i:\n if j == ' ': continue\n else: break\n if j=='#': continue\n items=i.split('=')\n detector={}\n detector['name']=items[0]\n Detectors.append(detector)\n #print Detectors \n #print '-----------------------------' \n return Detectors", "def _read_lexicon(a_dname):\n if not a_dname:\n return\n elif a_dname[-1] == '/':\n a_dname = os.path.dirname(a_dname)\n basename = os.path.basename(a_dname)\n if basename == HSAN:\n return _read_hsan(a_dname)\n elif basename == S140:\n return _read_s140(a_dname)\n elif basename == SUBJCL:\n return _read_subjcl(a_dname)\n elif basename == NRC_HSHTAG:\n return _read_nrc_hshtag(a_dname)\n else:\n raise Exception(\"Unknown dictionary format: '{:s}'\".format(basename))", "def load_uniprot(filepath):\n print('Loading uniprot dataset')\n with open(filepath) as handle:\n uniprot = [r for r in SeqIO.parse(handle, 'swiss')]\n repeated_seqs = set(seq for seq, count in Counter(u._seq._data for u in uniprot).items() if count > 1)\n return uniprot, repeated_seqs", "def _parser(self, tokens: List[str]):\n\n # TODO: Improve the CFG work for the following:\n # - Play songs faster than despicito\n # - Play something similar to despicito but faster\n # - Play something similar to u2 and justin bieber\n\n def gen_lexing_patterns(vals: List[str]):\n # TODO: Here we remove entries containing ',\n # as it is a special character used by\n # the NLTK parser. We need to fix this\n # eventually.\n safe_vals = [s for s in vals if \"\\'\" not in s]\n return \"' | '\".join(safe_vals) or \"NONE\"\n\n # A Probabilistic Context Free Grammar (PCFG)\n # can be used to simulate \"operator precedence\",\n # which removes the problems of ambiguity in\n # the grammar.\n grammar = nltk.PCFG.fromstring(\"\"\"\n Root -> Terminal_Command Result [0.6]\n Root -> Terminal_Command [0.4]\n Result -> Entity [0.5]\n Result -> Unary_Command Result [0.1]\n Result -> Result Binary_Command Result [0.4]\n Entity -> '{}' [1.0]\n Unary_Command -> '{}' [1.0]\n Terminal_Command -> '{}' [1.0]\n Binary_Command -> '{}' [1.0]\n \"\"\".format(\n gen_lexing_patterns(self.kb_named_entities),\n gen_lexing_patterns(self.keywords.get(\"unary\").keys()),\n gen_lexing_patterns(self.keywords.get(\"terminal\").keys()),\n gen_lexing_patterns(self.keywords.get(\"binary\").keys()),\n ))\n\n parser = nltk.ViterbiParser(grammar)\n # TODO: Returns the first tree, but need to deal with\n # case where grammar is ambiguous, and more than\n # one tree is returned.\n return next(parser.parse(tokens))", "def __init__(self, txt_path, in_vocab_path, out_vocab_path):\n self.txt_seqs = open(txt_path, encoding='utf8', errors='ignore').readlines()\n self.word2id = utils.load_vocab(in_vocab_path,\n extra_word_list=[\"<UNK>\", \"<END>\"])\n self.punc2id = utils.load_vocab(out_vocab_path,\n extra_word_list=[\" \"])\n self.class2punc = { k : v for (v, k) in self.punc2id.items()}", "def main():\n token_dict_dict = {}\n all_dict = {}\n pronoun_proportion_list = []\n tag = 'PRP' # base tag for all pronouns, see 'https://www.clips.uantwerpen.be/pages/MBSP-tags' for more info\n\n for text in glob.glob(file_loc):\n file_title = os.path.basename(text).split('.')[0]\n\n with open(text, 'r') as f:\n speech = f.read()\n text_dict = {}\n\n try:\n #TextBlob goodness that tags all the words for me\n speech_blob = TextBlob(clean(speech))\n speech_blob.tags\n except:\n #for some reason Trump's address contained a unicode 128 character that I couldn't find\n #instead of getting rid of it in a single file, i decided to have an except that could catch that case in\n #all sitations and handle them accordingly\n\n #lets the user know that there was an issue, and that it's been handled\n print file_title,\n print \"contains unexpected unicode characters. they have been removed and the document has been processed\"\n\n #gets rid of all unicode characters. i could do this by default, but all the other files ran fine\n #so i didn't think it was worth it\n speech_blob = TextBlob(clean(speech.decode('unicode_escape').encode('ascii','ignore')))\n\n for token in speech_blob.tags:\n # builds the inital dictionary of data, only looks at words with a specified tag\n if tag in token[1]:\n try:\n text_dict[token[0]] += 1\n except:\n text_dict[token[0]] = 1\n try:\n all_dict[token[0]] += 1\n except:\n all_dict[token[0]] = 1\n #breaks the title into 3 pieces: number, president, date\n token_dict_dict[file_title] = text_dict\n partial_split, date = string.rsplit(file_title, '_', 1)\n num_pres, pres = string.split(partial_split, '_', 1)\n\n pronoun_proportion_list.append(\n (pres, date, total_to_proportion(pronoun_breakdown(token_dict_dict[file_title])))\n )\n create_pronoun_graph(sort_list_by_president_order(pronoun_proportion_list))", "def get_keywords(text):\n tokens = [word.lower() for word in word_tokenize(text)]\n\n # tag words as verb, noun etc\n tagged_words = pos_tag(tokens)\n\n # retrieve list of boring words from file\n stopwords_file = os.path.join(BASE_DIR, 'data', 'stopwords.txt')\n with open(stopwords_file, 'r', encoding='utf-8') as f:\n stopwords = [line.rstrip(linesep) for line in f]\n \n #We don't want keywords to contain anything in this list\n forbidden = ['.',',',';',':','?','!','+',')','(','[',']','/','<','>','\"','©','1','2','3','4','5','6','7','8','9','0']\n\n # NLTK Chunking - detects noun phrases and phrases of form verb noun or adj noun\n patterns = \"\"\"NP: {<JJ>*<NN><NNS>}\n {<JJR><NNS>}\n {<JJ>*<NNS>}\n {<NN><NNS>} \n {<JJ><NNS>}\n {<JJ>*<NN>*}\n {<NN>*}\n {<NNS>*}\"\"\"\n chunker = RegexpParser(patterns)\n chunks = chunker.parse(tagged_words)\n\n #these are the phrases we want, as lists within a list\n validphrases = []\n for t in chunks.subtrees():\n if t.label() == 'NP':\n validphrases.append([x for x,y in t.leaves()])\n\n #turning lists within lists into actual noun phrases i.e [[radiation], [breast,cancer]] becomes [radiation, breast cancer]\n #sorry for my horrible code\n #trees suck\n lemmatizables = []\n for sublist in validphrases:\n lemmatizables.append(' '.join(sublist))\n\n lemmatizer = WordNetLemmatizer()\n lems = [lemmatizer.lemmatize(x) for x in lemmatizables]\n\n #removing stopwords after lemmatizinga, then removing anything containing punctuation or a number\n lems = filter(lambda lem: lem not in stopwords, lems)\n lems = filter(lambda lem: not any(char in lem for char in forbidden), lems)\n\n return tuple(lems)", "def haiku_string_parser():\n pass", "def parse(self):\n logger=self.logger\n tokenizer=Tokenizer()\n self.scope=produtil.testing.parsetree.Scope()\n self.override(self.scope)\n self.parser=Parser(self.run_mode,logger,self.verbose)\n self.parser.requested_platform_name=self.platform_name\n morevars=self.make_vars()\n with open(self.inloc,'rt') as fileobj:\n self.parse_result=self.parser.parse(\n TokenizeFile(tokenizer,fileobj,self.inloc,1),self.scope,\n unique_id=self.unique_id,morevars=morevars)", "def nlp_parse(self, input):\n resp = {}\n resp['type'] = 'nomatch'\n VDB_set = {}\n WP_set = {}\n tagset = self.build_tagset(input)\n resp['words'] = self.build_keywords(tagset)\n w = resp['words']\n\n if not w:\n if constants.DEBUG:\n log.debug(\"No words: \" + str(resp))\n return resp\n\n # store nouns\n NN_set = set(w.get('NN', []))\n\n # matches a request for a list\n if 'list' in NN_set \\\n or 'List' in w.get('NNP', []):\n resp['count'] = w.get('CD', [constants.LIST_COUNT])[0]\n resp['type'] = 'show-list'\n if set(['serving', 'serve']) & set(w.get('VBG', [])):\n resp['meal'] = (NN_set & constants.MEALS_SET).pop()\n if 'in' in w.get('IN', []):\n resp['zone'] = w.get('NNP', [None])[0]\n if 'close' in w.get('VBD', []) \\\n or 'close' in w.get('JJ', []) \\\n or 'close' in NN_set:\n resp['distance'] = True\n return resp\n\n # finds neighborhood\n for word in tagset:\n if word[1] == 'VBD':\n VDB_set = word[0]\n for word in tagset:\n if word[1] == 'WP':\n WP_set = word[0]\n if 'neighborhood' in VDB_set and 'what' in WP_set:\n if w.get('NNP', [None])[0]: \n r_name = w.get('NNP', [None])[0]\n else :\n return resp\n\n r_name = w.get('NNP', [None])[0] \n resp['restaurant'] = r_name\n resp['type'] = 'name-zone'\n return resp\n\n # matches \"how expensive it is\" and \"is it expensive\"\n if 'expensive' in w.get('JJ', ()):\n if w.get('NNP', [None])[0]: \n r_name = w.get('NNP', [None])[0]\n else :\n return resp\n\n r_name = w.get('NNP', [None])[0] \n resp['restaurant'] = r_name\n resp['type'] = 'name-price'\n return resp\n\n if 'between' in w.get('IN', ()) \\\n or 'price' in NN_set:\n price_range = w.get('CD', ())\n\n # price between a and b\n # require at least 2 numerals\n if len(price_range) >= 2:\n resp['min'] = min(map(int, price_range))\n resp['max'] = max(map(int, price_range))\n resp['type'] = 'list-price-range'\n return resp\n\n # price of exactly a\n if len(price_range) > 0:\n price_range = w.get('CD', ())\n resp['price'] = min(price_range)\n resp['type'] = 'list-price-single'\n return resp\n\n\n # need to merge NN and JJ for this step\n w['NNJJ'] = NN_set | set(w.get('JJ', []))\n meal = constants.MEALS_SET & w['NNJJ']\n if meal:\n resp['type'] = 'list-meal-single'\n resp['meal'] = meal.copy().pop()\n return resp\n\n # matches a quality list\n if 'quality' in NN_set and \\\n (constants.QUALITIES & w['NNJJ']) and \\\n (set(['food', 'service']) & w['NNJJ']):\n resp['degree'] = (constants.QUALITIES \\\n & w['NNJJ']).pop()\n resp['type'] = 'list-quality-' + \\\n (set(['food', 'service']) & w['NNJJ']).pop()\n return resp\n\n # matches a phone number request\n if NN_set & constants.PHONE_KEYWORDS:\n r_name = w.get('NNP', [None])[0] or \\\n w['NN'][-1]\n\n resp['restaurant'] = r_name\n resp['type'] = 'name-phone'\n return resp\n\n # matches a single meal request\n if NN_set & constants.MEALS_SET:\n r_name = w.get('NNP', [None])[0] or \\\n w['NN'][-1]\n\n resp['restaurant'] = r_name\n resp['type'] = 'name-meal'\n resp['meal'] = word.lower()\n return resp\n\n # matches a request for an address\n if 'address' in NN_set:\n r_name = w.get('NNP', [None])[0] or \\\n w['NN'][-1]\n resp['restaurant'] = r_name\n resp['type'] = 'name-location'\n return resp\n\n # matches a restaurant in neighborhood\n if 'in' in w.get('IN', []) and \\\n NN_set & constants.NAME_KEYWORDS:\n r_name = w.get('NNP', [None])[0]\n if not r_name:\n for kw in reversed(w['NN']):\n if kw not in constants.NAME_KEYWORDS:\n r_name = kw\n break\n if r_name:\n resp['type'] = 'random-city'\n resp['city'] = string.capitalize(r_name)\n return resp\n\n # matches a request for a cuisine type\n if NN_set & constants.NAME_KEYWORDS:\n r_name = w.get('NNP', [None])[0]\n if not r_name:\n for kw in reversed(w['NN']):\n if kw not in constants.NAME_KEYWORDS:\n r_name = kw\n break\n if r_name:\n resp['type'] = 'random-cuisine'\n resp['cuisine'] = string.capitalize(r_name)\n return resp\n\n # merge all numerals together for list-mode\n w['CDLS'] = set(w.get('CD', []) + w.get('LS', []))\n if w['CDLS']:\n w_copy = w['CDLS'].copy()\n while w_copy:\n try:\n resp['listitem'] = int(w_copy.pop())\n resp['type'] = 'single-listitem'\n return resp\n except:\n pass\n\n # distance / how far\n if ('far' in w.get('RB', [])\n and 'how' in w.get('WRB', [])\n ) or ('distance' in NN_set):\n r = w.get('NNP', [None])[0]\n if r:\n resp['type'] = 'name-distance'\n resp['restaurant'] = string.capitalize(r)\n return resp\n\n if constants.DEBUG:\n log.debug(resp)\n return resp", "def extract_keywords(raw_text,id):\n\n print(\"Extracting keywords for \"+id)\n\n stemmer = nltk.PorterStemmer()\n\n # Construct text\n\n # Tokens\n tokens = nltk.word_tokenize(raw_text)\n # filter undesirable words and format\n words = [w.replace('\\'','') for w in tokens if len(w)>=3]\n text = nltk.Text(words)\n\n tagged_text = nltk.pos_tag(text)\n #nouns = [tg[0] for tg in tagged_text if tg[1]=='NN' or tg[1]=='NNP' ]\n #print(nouns)\n\n # multi-term\n multiterms = set()\n stem_dico = {}\n for i in range(len(tagged_text)) :\n # max length 4 for multi-terms ==> 3\n for l in range(1,4) :\n if i+l < len(tagged_text) :\n tags = [tagged_text[k] for k in range(i,i+l)]\n if potential_multi_term(tags) :\n multistemlist = [str.lower(stemmer.stem(tagged_text[k][0])) for k in range(i,i+l)]\n #multistem.sort(key=str.lower)\n\t\t #python 3 : remove .encode('ascii','ignore')\n multistem = functools.reduce(lambda s1,s2 : s1+' '+s2,multistemlist)\n rawtext = functools.reduce(lambda s1,s2 : s1+' '+s2,[str.lower(tagged_text[k][0]) for k in range(i,i+l)])\n multiterms.add(multistem)\n if multistem in stem_dico :\n stem_dico[multistem].add(rawtext)\n else :\n stem_dico[multistem] = set([rawtext])\n\n return [list(multiterms),stem_dico]", "def __init__ (self, languageFilename):\n if not isinstance(languageFilename, str): # Checks if the filename is entered as a string.\n raise TypeError('The filename must be a string')\n self._words = set()\n try:\n with open(languageFilename) as data:\n line = data.readline()\n while line:\n line = line.rstrip()\n self._words.add(line)\n line = data.readline()\n except IOError:\n print('Please specify the correct name for the dictionary')", "def load_cmudict():\n with open(\"text/en/cmudict-0.7b.txt\", encoding=\"ISO-8859-1\") as file_reader:\n cmudict = (line.strip().split(\" \") for line in islice(file_reader, 126, 133905))\n\n cmudict = {format_alt_entry(word): pronunciation for word, pronunciation in cmudict}\n\n return cmudict", "def parse(program):\n return read_from_tokens(tokenize(program))", "def process_input(fname,onlynugget,onlyarg):\n content=utils.readFileEncode(fname,'utf8')\n lines = content.split('\\n')[:-1]\n sentences=[]\n labels=[]\n sent=[]\n label=[]\n for i in range(len(lines)):\n if len(lines[i])>3:\n words=lines[i].split('\\t')\n word={'originalText':words[0],'offset':int(words[1])}\n sent.append(word)\n if onlynugget:\n if words[2] in NuggetList10:\n label.append(words[2]) \n else:\n label.append('O')\n elif onlyarg:\n if words[2] in ArgumentList:\n\n if 'Software' in words[2]:\n label.append(words[2][0:2]+'System')\n else:\n label.append(words[2])\n else:\n label.append('O')\n else:\n if len(sent)>0 and len(label)>0: \n sentences.append(sent)\n labels.append(label) \n sent=[]\n label=[]\n elif len(sent)==0 and i < len(lines)-1:\n sentences.append([])\n labels.append([])\n \n return sentences,labels", "def _generate_keywords(self):\n _keywords = [*self._lookup_opcodes_dir.keys(), *self._registers_list.keys()]\n for key in _keywords:\n self._keywords.extend(key.split(\" \"))\n return", "def __init__(self, filename):\n\n self.term_dict = {}\n for line in open(filename):\n if line.startswith(\"#\"):\n continue\n\n #print line\n word, w_type = line.strip().split(\"\\t\")\n self.term_dict[word.strip().lower()] = \"CHESS_\" + w_type.strip().lower()", "def read_tagged_word_list(filename):\n # TODO: write and test this method\n print 'reading tagged file'", "def load_ud_english(fpath):\n import os\n import re\n from collections import defaultdict\n n = 1\n\n fname = os.path.split(fpath)[1]\n\n parses = defaultdict(list)\n sent_ids = []\n newdoc_ids = []\n \n for l in open(fpath):\n ident = fname+' '+str(n)\n \n if re.match(r'\\# newdoc id', l):\n newdoc_ids.append(n)\n #newdoc_ids.append(l.split(\"=\")[-1].strip())\n \n if re.match(r'^\\d', l):\n l_split = l.strip().split()\n parses[ident].append(l_split)\n \n elif parses[ident]:\n sent_ids.append(ident)\n n += 1\n\n return newdoc_ids, len(sent_ids)", "def __init__(self):\n self._pronunciations = nltk.corpus.cmudict.dict()\n \"\"\"\n API Documentation for CMU dictionary corpus\n http://www.nltk.org/api/nltk.corpus.reader.html#module-nltk.corpus.reader.cmudict\n \"\"\"", "def __init__(self):\n for item in grammar:\n item['matches_compiled'] = {}\n for name,pattern in item['matches'].items():\n item['matches_compiled'][name] = \\\n re.compile(pattern, re.IGNORECASE)\n\n item['semantics_compiled'] = {}\n for name,pattern in item['semantics'].items():\n item['semantics_compiled'][name] = \\\n re.compile(pattern)\n\n if constants.SPELLCHECK:\n self.didyoumean = DidYouMean('en-us', constants.DICT_DIR)", "def get_required_keywords_from_original():\n required_keywords = {}\n f = open('required_keywords.txt', 'r')\n curr_instrument = \"\"\n for line in f:\n if line[-2:] == \":\\n\":\n instrument = line[:-2]\n curr_instrument = instrument\n if instrument not in required_keywords.keys():\n required_keywords[instrument] = {}\n #print (line[:-2])\n elif line == \"\\n\":\n pass\n else:\n line = re.sub('[(),\\'|]', '', line)\n line = re.sub('\\.', ' ', line)\n new_line = line.split(' ')\n final_line = []\n final_line.append(new_line[0])\n for l in range(1,len(new_line)):\n temp_word = str(new_line[l][:8])\n temp_word = re.sub('\\n','',temp_word)\n if temp_word not in final_line:\n final_line.append(temp_word)\n required_keywords[curr_instrument][final_line[0]] = final_line[1:]\n more_required = ['REFTYPE', 'DESCRIP', 'AUTHOR', 'PEDIGREE', 'HISTORY']\n for k,v in required_keywords.iteritems():\n path = 'required_keywords/' + k + '_required_keywords.csv'\n with open(path, 'wb') as csvfile:\n keywriter = csv.writer(csvfile, delimiter=' ', quotechar='|',quoting=csv.QUOTE_MINIMAL)\n for key,value in v.iteritems():\n keywriter.writerow([key]+value + more_required)", "def init() -> None:\n init_dict()\n parse_file(\"alphabet.txt\", letters)\n parse_file(\"numbers.txt\", numbers)\n parse_file(\"symbols.txt\", symbols)", "def _read_in_keyword_file(keyword_file_path):\r\n inf = codecs.open(keyword_file_path, 'r', 'utf-8')\r\n k = json.load(inf)\r\n inf.close()\r\n return k", "def lookup_keywords(filename):\n keywords = []\n start_of_table = r'\\*+\\s+'\n start_of_kw_table = r'\\*+\\s+Keyword'\n in_kw_table = False\n f = open(filename, \"r\")\n for line in f.readlines():\n line = line.rstrip()\n if len(line) == 0 or line.startswith(\"#\"):\n continue # skip comments and blanks\n if re.match(start_of_kw_table, line):\n in_kw_table = True # table started\n continue\n if re.match(start_of_table, line) and not re.match(start_of_kw_table, line):\n in_kw_table = False # table ended\n continue\n if line.startswith(' '):\n continue # skip content rows\n if in_kw_table:\n keywords.append(line)\n f.close()\n return keywords", "def parse_uniprotkb(indices_from_pfam_id):\n # Parses Uniprot dat file, keeping IDs and sequences of entries present in\n # Pfam-A seed.\n uniprot_ids, uniprot_sequences = [], []\n for input_file in FLAGS.input_file:\n with tf.io.gfile.GFile(input_file, 'rb') as f:\n line_reader = buffered_line_reader.BufferedLineReader(\n f, sep=SEP, buffer_size=BUFFER_SIZE)\n for entry in line_reader:\n id_line, entry = entry.split('\\n', 1)\n g = ID_REGEX.match(id_line)\n # Skips malformed / incomplete entries.\n if g is not None:\n uniprot_id, seq_len = g.group(1), int(g.group(2))\n # Parses sequence data lines iff the entry is part of Pfam-A seed.\n if uniprot_id in indices_from_pfam_id:\n seq_entry = entry.split('SQ SEQUENCE', 1)[-1]\n seq_entry = seq_entry.split('\\n', 1)[-1]\n uniprot_sequence = ''.join([line.strip().replace(' ', '')\n for line in seq_entry.split('\\n')])\n if len(uniprot_sequence) != seq_len:\n raise ValueError(\n f'Length for entry {uniprot_id} ({len(uniprot_sequence)}) '\n f'does not match ID line ({seq_len})!')\n uniprot_ids.append(uniprot_id)\n uniprot_sequences.append(uniprot_sequence)\n logging.info(\n 'Found %d matching entries in %s (%d unique).',\n len(uniprot_ids), ', '.join(FLAGS.input_file), len(set(uniprot_ids)))\n return uniprot_ids, uniprot_sequences", "def main ():\n\n\tfio = fileIo('input.txt')\n text = fio.getInput()\n\n\tp = re.compile(r'#?\\d[\\s\\.]?[\\s]?')\n\tout = filter(None, p.split(text))\n\ti = 0\n\tlistOfLists = []\n\t\n\n\tfor s in out:\n\t\ti += 1\n\t\ttext = nltk.word_tokenize(s)\n\t\tpos = nltk.pos_tag(text)\n\t\tpattern = \"NP: {<DT>?<JJ>*<NN>}\"\n\t\tNPChunker = nltk.RegexpParser(pattern)\n\t\tresult = NPChunker.parse(pos)\n\t\tlistOfLists.append( result )\n\n\tprint \"Noun Count:\\n\" + str(countNouns( listOfLists ))\n\tprint \"Verb Count:\\n\" + str(countVerbs( listOfLists ))\n\tprint \"Adjective Count:\\n\" + str(countAdjectives( listOfLists ))", "def process_query(file: TextIO) -> 'NGOMatch':\n query_dict = {}\n query_dict['skills'] = {}\n query_dict['interest'] = []\n line = file.readline().strip()\n \n query_dict['skills']['technical'] = []\n query_dict['skills']['interpersonal'] = []\n \n line = file.readline().strip()\n line = file.readline().strip()\n while line != 'Interpersonal':\n query_dict['skills']['technical'].append(line)\n line = file.readline().strip() \n \n line = file.readline().strip()\n while line != 'INTEREST':\n query_dict['skills']['interpersonal'].append(line)\n line = file.readline().strip()\n \n line = file.readline().strip() \n while line != 'NUMBER':\n query_dict['interest'].append(line)\n line = file.readline().strip()\n \n line = file.readline().strip()\n while line != 'SORT':\n query_dict['number'] = line\n line = file.readline().strip()\n \n line = file.readline().strip()\n while line != '':\n if line[:5] == 'skill':\n query_dict['sort-by']['skill'] = line[5:].strip()\n if line [:8] == 'interest':\n query_dict['sort-by']['interest'] = line[8:].strip()\n line = file.readline().strip()\n \n return query_dict", "def ReadMorphit(self):\r\n self.words = {}\r\n with codecs.open(self.MorphItFileName, 'r', 'utf-8') as f:\r\n for line in f.readlines():\r\n line = line.split()\r\n try:\r\n# print (line)\r\n self.words[line[0]] = line[2][:3]\r\n# if line[2][:3] in self.verbTags:\r\n# line[2]=line[2].split(u'+')\r\n# line[2][0]=line[2][0][line[2][0].find(u':')+1:]\r\n except:\r\n pass\r\n return self.words", "def load_objects(self):\n \n # Load classifier\n with open('../twitterClass/classifier/classifier.p','r') as f:\n self.classifier = cPickle.load(f)\n \n #Load blocked keywords\n regex_str2 = []\n with open('../twitterClass/twitterMiningClass/private/blocked_keywords.txt','r') as f:\n keywords = f.read().split('\\n')\n for key in keywords:\n key = key.split(',')\n #key[0] = keyword name , key[1] = pattern\n print key\n regex_str2.append(key[1])\n # create regex compiler for blocked keyword search\n regex_str2 = map(lambda x: x.replace(\"\\\\\\\\\",\"\\\\\"),regex_str2)\n self.blocked_keywords_re = re.compile(r'('+'|'.join(regex_str2)+')',re.IGNORECASE)\n \n # Load keywords\n with open('../twitterClass/twitterMiningClass/private/keywords.txt','r') as f:\n keywords = f.read().split('\\n')\n for key in keywords:\n key = key.split(',')\n #key[0] = keyword name , key[1] = pattern\n self.keywords[key[0]] = key[1]\n # create regex compiler for keyword search\n regex_str = []\n for keys,pattern in self.keywords.iteritems():\n regex_str.append(pattern)\n regex_str = map(lambda x: x.replace(\"\\\\\\\\\",\"\\\\\"),regex_str)\n self.keywords_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE)", "def SetupKeywords(self):\n kwlist = u\" \".join(self._keywords)\n self.SetKeyWords(0, kwlist)", "def scan(self):\n self.tokfile = open(self.tokfile_path, 'w')\n word = ''\n for line in open(self.srcfile):\n for ch in line:\n if ch in alphanum: \n word += ch\n else:\n if word:\n try:\n self.print_tok('$int', int(word))\n except ValueError:\n if word in self.reserved: \n self.print_tok('$' + word)\n else:\n self.print_tok('$id', word)\n if ch in special:\n self.print_tok(ch)\n word = ''\n self.tokfile.close()", "def __init__(self):\n\n self._LDAU_KEYS = ['LDAUTYPE', 'LDAUPRINT', 'MAGMOM', 'LDAUL', 'LDAUJ', 'LDAUU', 'LDAU'] \n\n self.structure_has_been_read = False", "def load_conll_notags(unfile, max_slen, vocab=[], oovs={}, pads={}, lower=False, mwe=True, unk_case=True):\n # special characters used for splitting words\n split_chars = set([',', '.', ':', '-', '~', \"'\", '\"'])\n\n # punctuation that denotes when a sentence finishes\n sent_split_words = set(['.', '?', '!', ';', '—'])\n\n input_sents = []\n input_words = []\n windex = -1\n\n # number of words from which to split sentences\n LIMIT_SENT_LEN = max_slen\n\n sents = []\n if 'begin' in pads:\n next_words = [pads['begin']]\n next_syms = ['']\n next_indexs = [windex]\n sent_base_length = 1\n else:\n next_words = []\n next_syms = []\n next_indexs = []\n sent_base_length = 0\n\n # select files to use\n input_files = [unfile]\n\n # counters\n num_raw_sents = 0\n num_sents = 0\n num_words = 0\n num_oovs = 0\n\n # iterate over lines in the input files\n for ifile in input_files:\n for line in codecs.open(ifile, mode = 'r', errors = 'ignore', encoding = 'utf-8'):\n # discard newline character\n line = line[:-1]\n\n # keep adding words while in the middle of a sentence\n if line:\n word = line.split('\\t')[0]\n sym = word\n # add new original word\n windex += 1\n input_words.append(word)\n num_words += 1\n # lowercase when indicated\n if lower:\n word = word.lower()\n # use an heuristic and try to map oov words\n if vocab and word not in vocab:\n if word not in split_chars:\n if re.match('^[0-9\\.\\,-]+$', word):\n word = oovs['number']\n elif _match_word_vocab(word, vocab) != word:\n word = _match_word_vocab(word, vocab)\n elif ' ' in word or '~' in word or '-' in word and mwe:\n # attempt to split multi-word expressions\n constituents_text = re.split('[\\s~ | \\s-]+', word)\n constituents = [_match_word_vocab(w, vocab) for w in constituents_text]\n if all([True if c in vocab else False for c in constituents]):\n next_words += constituents[:-1]\n next_syms += constituents[:-1]\n next_indexs += [windex] * len(constituents[:-1])\n word = constituents[-1]\n sym = constituents[-1]\n else:\n if unk_case and word[0].isupper():\n word = oovs['UNKNOWN']\n else:\n word = oovs['unknown']\n num_oovs += 1\n else:\n if unk_case and word[0].isupper():\n word = oovs['UNKNOWN']\n else:\n word = oovs['unknown']\n num_oovs += 1\n else:\n word = oovs['unknown']\n num_oovs += 1\n\n next_words.append(word)\n next_syms.append(sym)\n next_indexs.append(windex)\n\n # stack the current sentence upon seeing an empty line or a sentence end mark\n if not line or (len(next_words) > 3 and next_words[-4] in sent_split_words) or (len(next_words) >= LIMIT_SENT_LEN and len(sent_split_words.intersection(next_words)) < 1):\n if len(next_words) > sent_base_length:\n # split when an empty line marks a sentence end\n if not line:\n if 'end' in pads:\n next_words.append(pads['end'])\n next_syms.append('')\n next_indexs.append(-1)\n sents.append(list(zip(next_words, next_indexs, next_syms)))\n input_sents.append(input_words)\n input_words = []\n windex = -1\n next_words = []\n next_syms = []\n next_indexs = []\n num_raw_sents += 1\n num_sents += 1\n # split when punctuation marks a sentence end\n elif len(next_words) > 3 and next_words[-4] in sent_split_words:\n split_words = next_words[:-3]\n split_syms = next_syms[:-3]\n split_indexs = next_indexs[:-3]\n if 'end' in pads:\n split_words.append(pads['end'])\n split_syms.append('')\n split_indexs.append(-1)\n sents.append(list(zip(split_words, split_indexs, split_syms)))\n next_words = next_words[-3:]\n next_syms = next_syms[-3:]\n next_indexs = next_indexs[-3:]\n num_sents += 1\n # split when the maximum sentence length is reached\n # a bad guess is better than not guessing when predicting tags\n else:\n sents.append(list(zip(next_words, next_indexs, next_syms)))\n next_words = []\n next_syms = []\n next_indexs = []\n num_sents += 1\n\n if 'begin' in pads:\n next_words = [pads['begin']] + next_words\n next_syms = [''] + next_syms\n next_indexs = [-1] + next_indexs\n\n else:\n input_sents.append(input_words)\n input_words = []\n windex = -1\n num_raw_sents += 1\n\n # double check the last sentence\n if len(next_words) > sent_base_length:\n if 'end' in pads:\n next_words.append(pads['end'])\n next_syms.append('')\n next_indexs.append(-1)\n sents.append(list(zip(next_words, next_indexs, next_syms)))\n input_sents.append(input_words)\n input_words = []\n windex = -1\n num_raw_sents += 1\n num_sents += 1\n\n # find the allowed sentence length\n print('[INFO] Number of unlabelled OOV words: ' + str(num_oovs) + ' / ' + str(num_words))\n print('[INFO] Original number of unlabelled sentences: ' + str(num_raw_sents))\n print('[INFO] Number of extracted unlabelled sentences ' + str(num_sents))\n return input_sents, sents", "def read_dictionary():\n # model = 'en_core_web_sm'\n # model = 'en_core_web_md'\n # model = 'en_core_web_lg'\n model = 'en' # Using 'en' instead of 'en_core_web_md', as the latter has many words without vector data. Check!\n print(\"Starting to read the model:\", model)\n # nlp = spacy.cli.download(model) # Run this for the first time on a new server.\n nlp = spacy.load(model) # Use this for subsequent runs\n return nlp", "def ParseArgs() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '--input', dest='input', help='Japanese phonetic reading file')\n parser.add_argument(\n '--output_token_array',\n dest='output_token_array',\n help='Output token array file.')\n parser.add_argument(\n '--output_string_array',\n dest='output_string_array',\n help='Output string array file.')\n return parser.parse_args()", "def read_language(filehandle):\n from bs4 import BeautifulSoup as bs\n soup = bs(filehandle.read())\n primary_name = soup.find(\"meta\", property=\"og:title\")[\"content\"]\n alternate_names = soup.find(\"div\", class_=\"field-name-field-alternate-names\" ).find(\"div\", class_=[\"field-item\", \"even\"]).string.split(\", \")\n classification = soup.find(\"div\", class_=\"field-name-language-classification-link\").find(\"div\", class_=[\"field-item\", \"even\"]).string.split(\", \")\n dialects = soup.find(\"div\", class_=\"field-name-field-dialects\" ).find(\"div\", class_=[\"field-item\", \"even\"]).p.get_text()\n return ([unicode(primary_name)]+alternate_names, classification, dialects)", "def __init__(self, stopwordsfile='', \n\t\t\t\t\t\tpunctuationfile='', \n\t\t\t\t\t\tdictionaryfile='',\n\t\t\t\t\t\tapostrophefile='',\n\t\t\t\t\t\tarticlefile=''):\n\n\t\tself._stopwords = set(['a', 'able', 'about', 'across', 'after', \n\t\t\t\t\t\t\t\t'all', 'almost', 'also', 'am', 'among', 'an', \n\t\t\t\t\t\t\t\t'and', 'any', 'are', 'as', 'at', 'be', \n\t\t\t\t\t\t\t\t'because', 'been', 'but', 'by', 'can', 'come', \n\t\t\t\t\t\t\t\t'cannot', 'could', 'dear', 'did', 'do', 'does',\n\t\t\t\t\t\t\t\t'either', 'else', 'ever', 'every', 'for', \n\t\t\t\t\t\t\t\t'from', 'get', 'got', 'had', 'has', 'have', \n\t\t\t\t\t\t\t\t'he', 'her', 'hers', 'him', 'his', 'how', \n\t\t\t\t\t\t\t\t'however', 'i', 'if', 'in', 'into', 'is', 'it',\n\t\t\t\t\t\t\t\t'its', 'just', 'least', 'let', 'like', \n\t\t\t\t\t\t\t\t'likely', 'may', 'me', 'might', 'most', 'must',\n\t\t\t\t\t\t\t\t'my', 'neither', 'no', 'nor', 'not', 'of', \n\t\t\t\t\t\t\t\t'off', 'often', 'on', 'only', 'or', 'other', \n\t\t\t\t\t\t\t\t'our', 'own', 'rather', 'said', 'say', 'says',\n\t\t\t\t\t\t\t\t'she', 'should', 'since', 'so', 'some', 'than',\n\t\t\t\t\t\t\t\t'that', 'the', 'their', 'them', 'then', \n\t\t\t\t\t\t\t\t'there', 'these', 'they', 'this', 'those', 'tis', 'to', \n\t\t\t\t\t\t\t\t'too', 'twas', 'us', 'wants', 'was', 'we', \n\t\t\t\t\t\t\t\t'were', 'what', 'when', 'where', 'which', \n\t\t\t\t\t\t\t\t'while', 'who', 'whom', 'why', 'will', 'with', \n\t\t\t\t\t\t\t\t'would', 'yet', 'you', 'your'])\n\n\t\tself._punctuation = ['!', \"\\\"\", '#', '\\$','%','&',\"'\",'\\(','\\)','\\*','\\+',\n\t\t\t\t\t\t',', '-','\\.','/',\"\\\\\\\\\",':',';','\\<','\\=','\\>','\\?',\n\t\t\t\t\t\t'@','\\[', '\\|','\\]','\\^','_','`','{','}','~','¡','¿',\n\t\t\t\t\t\t'—','–','…','�', '”','“','‘','’','´','¯','•','→','®']\n\t\tself._articles = ['a', 'an', 'and', 'the']\n\t\tself._apostrophe = [\"'\", \"‘\",\"’\"]\n\n\n\t\tif stopwordsfile.strip() != '':\n\t\t\tself._stopwords = set([w.strip().lower() \n\t\t\t\t\t\t\t\t\tfor w in open(stopwordsfile, 'r')])\n\n\t\tif dictionaryfile.strip() != '':\n\t\t\tself._dictionary = set([w.strip().lower() \n\t\t\t\t\t\t\t\t\tfor w in open(dictionaryfile, 'r')])\n\n\t\tif articlefile.strip() != '':\n\t\t\tself._articles = [w.strip() \n\t\t\t\t\t\t\tfor w in open(articlefile, 'r').readlines()]\n\n\t\tif punctuationfile.strip() != '':\n\t\t\tself._punctuation = [w.strip() \n\t\t\t\t\t\t\tfor w in open(punctuationfile, 'r').readlines()]\n\n\t\tif apostrophefile.strip() != '':\n\t\t\tself._apostrophe = [w.strip() \n\t\t\t\t\t\t\tfor w in open(apostrophefile, 'r').readlines()]\n\n\t\tself._compileRegex()", "def test_grammar_parse():\n print u\"%s: Grammar test\" % (__file__, )\n print u\"Deriving grammar from parsed TIGER corpus sentences\"\n #tiger_corpus = TigerCorpusReader()\n tiger_corpus = _cached(None, CORPUS_PATH, TigerCorpusReader)\n grammar_parser = tiger_corpus.viterbi_parser(False)\n grammar_parser.trace()\n\n text = nltk.word_tokenize(u\"Der Hase springt über den Baum, der sehr hoch gewachsen ist.\")\n #text = nltk.word_tokenize(u\"Der kleine gelbe Hund beobachtete die Katze.\")\n text = nltk.word_tokenize(u\"Der kleine Hund blickte zu der Katze.\")\n print u\"Parsing unknown text\"\n try:\n tree = grammar_parser.parse(text)\n if tree:\n tree.draw()\n print u\"Printing parse tree for text...\"\n print unicode(tree)\n except ValueError as e:\n print u\"Input contains words not known by grammar!\"\n print u\"%s\" % e", "def process_tags(filename):\n keys = {\"lower\": 0, \"lower_colon\": 0, \"problemchars\": 0, \"other\": 0}\n for _, element in ET.iterparse(filename):\n keys = key_type(element, keys)\n\n return keys", "def readtags(fname):\n datas = {}\n muts = None\n fields = ['album', 'artist', 'title', 'genre', 'date', 'tracknumber',\n 'year']\n try:\n muts = mutagen.File(fname, easy=True)\n except:\n logger.error(\"can't read tags in [%s]\" % fname)\n\n if muts:\n for fld in fields:\n if fld in muts:\n data = muts[fld]\n datas[fld] = data[0]\n else:\n datas[fld] = ''\n\n return datas", "def read(file_):\n if not os.path.isfile(file_):\n raise AssertionError()\n\n dict_ = {}\n for line in open(file_).readlines():\n\n list_ = shlex.split(line)\n\n is_empty = (list_ == [])\n\n if not is_empty:\n is_keyword = list_[0].isupper()\n else:\n is_keyword = False\n\n if is_empty:\n continue\n\n if is_keyword:\n keyword = list_[0]\n dict_[keyword] = {}\n continue\n\n process(list_, dict_, keyword)\n\n dict_ = auxiliary(dict_)\n\n # We perform some basic consistency checks regarding the user's request.\n check_initialization_dict(dict_)\n\n return dict_", "def main():\n file = \"http://icarus.cs.weber.edu/~hvalle/hafb/words.txt\"\n words = fetch_words(file)\n print_items(words)", "def parse_products(self, infile):\r\n raise NotImplementedError()", "def populate_keywords(kwds, pkg_id):\n if not kwds:\n return\n for word in kwds:\n # @todo(Check data and use the special character-list\n # variable in the constants' file.)\n word = word.strip(\".:;=-,\\\"'\\n $_%{}()[]^*?& +#`\").lower()\n if len(word) <= 1 or (word in constants.STOP_WORDS) or \\\n has_special_chars(word):\n continue\n insert_keyword(word, pkg_id)", "def main():\t\n\t# read in short term auth\n\tf = open('./input.txt', 'r')\n\tshort_term_auth = f.read()\n\n\tlong_term_access_token = long_term_token(short_term_auth)\n\tprint(long_term_access_token)\n\tprint('run program like normal now')", "def process(self, location):\n with open(location, 'r') as fil:\n \n for line in fil:\n split = line.split('|')\n umls_cui, language, kb, term_type, kb_cui, kb_name = self.__getConcepts(split)\n self.__addCUI(umls_cui, language, kb, term_type, kb_cui, kb_name)", "def _read_nrc_hshtag(a_dname):\n fields = None\n term = tclass = \"\"\n pos = set(); neg = set()\n print(\"Reading NRC-Hashtag-Sentiment-Lexicon-v0.1... \", end = \"\", file = sys.stderr)\n for fname in [\"sentimenthashtags.txt\"]: # skip bigrams for the time being\n fname = os.path.join(a_dname, fname)\n with codecs.open(fname, 'r', ENCODING) as ifile:\n for iline in ifile:\n iline = iline.strip()\n if not iline:\n continue\n term, tclass = TAB_RE.split(iline)\n term = _cleanse(term)\n if not term:\n continue\n if tclass == NRC_POSITIVE:\n pos.add(term)\n elif tclass == NRC_NEGATIVE:\n neg.add(term)\n print(\"done\", file = sys.stderr)\n return (pos, neg)", "def get_keywords_for_movie(url):\n pass", "def set_keywords(self):\n\n if len(self.get_keywords()) == 0 and len(self.get_files()) > 0:\n self.keywords = self.files[0].get_parent()[\"title\"].split(\" \")\n for keyword in self.keywords:\n if str(keyword) in str(self.text):\n self.keywords = []", "def demo_legacy_grammar():\n from nltk.grammar import parse_fcfg\n\n g = parse_fcfg(\"\"\"\n % start S\n S[sem=<hello>] -> 'hello'\n \"\"\")\n print \"Reading grammar: %s\" % g\n print \"*\" * 20\n for reading in batch_interpret(['hello'], g, semkey='sem'):\n syn, sem = reading[0]\n print\n print \"output: \", sem", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"infile\", help=\"Text file to be analyzed.\")\n args = parser.parse_args()\n with open(args.infile, encoding=\"utf-8\") as f:\n text = f.read()\n words = text.split()\n unique_words(words)", "def parse(args, query):\n\n global query_type\n\n # Deal first with requests for definition or pronunciation\n # 1. Make the code easier to read\n first_word = args[0]\n second_word = args[1] if len(args) > 1 else \"\"\n third_word = args[2] if len(args) > 2 else \"\"\n fourth_word = args[3] if len(args) > 3 else \"\"\n # we use the teranary operator (this if ____ else that) to avoid an IndexError\n # IndexError would be raised if we tried to access the second element (args[1])\n # in a list which contained only one item (eg args == [\"lonely\"])\n # the teranary operator (in most languages it looks like \"____ ? this : that\")\n # returns \"this\" when the if is true and \"that\" when the if is false\n # meaning, if len(args) is NOT greater than 1, second_word == \"\"\n\n # 2. Check for keywords in the list of arguments\n # Example: nostrum defined\n # Example: pronunciation of otolaryngology\n if first_word == \"define\":\n # e.g. if the first word is \"define\" we'll add the second word to the query\n query = {\"sp\": second_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the query is a dictionary of GET parameters for the http request, eg\n # https://api.datamuse.com/words?max=1&sp=SECOND_WORD_HERE&qe=sp&md=d&ipa=1\n elif second_word == \"defined\" or second_word == \"definition\":\n query = {\"sp\": first_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # this one uses string interpolation (the f\"\" stuff)\n elif f\"{second_word} {third_word}\" == \"means what\":\n query = {\"sp\": first_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n elif f\"{second_word} {third_word} {fourth_word}\" == \"is said how\":\n query = {\"sp\": first_word, \"md\": \"r\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # this one uses regular expressions -- i.e. if the second_word is \"of\" or \"for\"\n elif first_word == \"definition\" and re.match(r'(of)|(for)',second_word):\n query = {\"sp\": third_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the is_pronounced function returns true if first_word is a (mis)spelling of pronounced\n elif re.match(r'(of)|(for)',second_word) and is_pronounced(first_word):\n query = {\"sp\": third_word, \"md\": \"r\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the ordering in the above list is not entirely random\n # since an if-elif-else statement won't keep evaluating after it finds a match\n # it makes sense to put the most computationally complex clauses at the end\n # >>> import timeit\n # >>> timeit.timeit('from word_helpers import is_pronounced; is_pronounced(\"pronounced\")', number=10000)\n # 0.022870146989589557\n # >>> timeit.timeit('args = [\"defined\"]; args[0] == \"defined\"', number=10000)\n # 0.002359684993280098\n # it takes 2 milliseconds to compare a string in a list 10,000 times\n # -- versus 2 centiseconds to run is_pronounced 10,000 times\n # (on my Intel Core i5 2.67GHz CPU -- obviously speed depends on the processor)\n # it's also worth noting that readability counts more than speed optimization (most of the time!)\n\n # Quick way to check if any of the above if statements matched\n if \"sp\" in query:\n # if so, we are done in this function\n if query[\"md\"] == \"r\": query_type = \"PRO\"\n if query[\"md\"] == \"d\": query_type = \"DEF\"\n return query\n\n # these will be useful later\n STOP_WORDS = (\"and\", \"meaning\", \"means\", \"max\", \"about\", \"which\", \"that\")\n\n # Parse more complicated requests for synonyms, etc\n # 0 is false in python, so this loop will run until we've removed all the args\n while len(args):\n # we must reset these vars each time the loop starts\n # in case we've deleted items from the args list\n first_word = args[0]\n second_word = args[1] if len(args) > 1 else \"\"\n third_word = args[2] if len(args) > 2 else \"\"\n # we use the teranary operator (this if ____ else that) to avoid an IndexError\n # IndexError would be raised if we tried to access the second element (args[1])\n # in a list which contained only one item (eg args == [\"lonely\"])\n # the teranary operator (in most languages it looks like \"____ ? this : that\")\n # returns \"this\" when the if is true and \"that\" when the if is false\n # meaning, if len(args) is NOT greater than 1, second_word == \"\"\n\n # Disambiguate homonym requests from spelling correction requests\n # Example: sounding like tung\n # Example: sounds like doe but spelled differently\n if re.match(r'sound((s)|(ing)) like',f\"{first_word} {second_word}\"):\n\n # again, use len(args) to avoid an IndexError\n if len(args) >= 6 and \\\n re.match(r'((but)|(except)) spelled different(ly)?',f\"{args[3]} {args[4]} {args[5]}\"):\n # but instead of teranary operator,\n # use \"short circuit logic\" -- when python sees \"if __A__ and __B__ \",\n # it knows that if A is false, the whole thing will be false\n # (you can't have \"ice cream and potatoes\" for dinner if you don't have ice cream)\n # and it won't waste time evaluating B, so re.match won't run and args[4]\n # won't be accessed and no IndexError will be raised, yay!\n # regex explained: ? means the prior thing matched zero or one times\n # different(ly)? matches \"different\" and \"differently\"\n query[\"rel_hom\"] = third_word\n # now, delete 6 items from args, starting at item 0\n del args[0:6]\n else:\n query[\"sl\"] = third_word\n del args[0:3]\n\n # Example: spelled like 'cens?r'\n elif re.match(r'spell((ed)|(ing)) like',f\"{first_word} {second_word}\"):\n # two stars (**) means \"unpack\" a dictionary\n # just like unpacking a suitcase, we've dumped the old contents of query\n # into a new dictionary (which we are saving with the same variable name!)\n query = {**query,\"sp\": third_word}\n # query[\"sp\"] = third_word also works fine\n # just showing off how to combine two dictionaries :)\n del args[0:3]\n\n # Example: rhymes with culminate\n elif len(args) > 2 and second_word == \"with\" and is_rhymes(first_word):\n query[\"rel_rhy\"] = third_word\n del args[0:3]\n\n # Example: almost rhymes with culminate\n elif len(args) > 3 and \\\n f\"{first_word} {third_word}\" == \"almost with\" and \\\n is_rhymes(second_word):\n query[\"rel_nry\"] = args[3] # fourth_word\n del args[0:4]\n\n # Example: comes after sea\n elif f\"{first_word} {second_word}\" == \"comes after\":\n query[\"lc\"] = third_word\n del args[0:3]\n elif first_word == \"follows\":\n query[\"lc\"] = second_word\n del args[0:2]\n elif f\"{first_word} {second_word}\" == \"comes before\":\n query[\"rc\"] = third_word\n del args[0:3]\n elif first_word == \"preceeds\":\n query[\"rc\"] = second_word\n del args[0:2]\n\n # Example: describes paint\n elif first_word == \"describes\":\n query[\"rel_jjb\"] = second_word\n del args[0:2]\n\n # Example: associated with feet\n elif f\"{first_word} {second_word}\" == \"associated with\" or \\\n f\"{first_word} {second_word}\" == \"triggered by\":\n query[\"rel_trg\"] = third_word\n del args[0:3]\n\n # Example: meaning feeling tired\n elif first_word in [\"means\",\"meaning\",\"like\"]:\n # get rid of first_word\n del args[0]\n # now short circuit logic again, plus using the tuple from ealier\n # b/c if we have \"meaning deer and sounds like roe\" we don't want\n # query[\"ml\"] == \"deer and sounds like roe\" -- it should be just \"deer\"\n while len(args) and args[0] not in STOP_WORDS:\n # teranary operator prevents KeyError if \"ml\" not already in query dictionary\n query[\"ml\"] = f\"{query['ml']} {args[0]}\" if \"ml\" in query else args[0]\n del args[0]\n # an example with the previous code to make things clearer\n # say args == [\"means\", \"egg\", \"beater\", \"and\", \"max\", \"35\"]\n # first_word IS in [\"means\",\"meaning\",\"like\"]\n # del first_word, args is now [\"egg\", \"beater\", \"and\", \"max\", \"35\"]\n # len(args) == 5, args[0] is NOT in STOP_WORDS\n # \"ml\" is NOT in query, so teranary returns args[0] (\"egg\")\n # args[0] is copied to query[\"ml\"] (query is now {ml: \"egg\"})\n # del args[0], args is now [\"beater\", \"and\", \"max\", \"35\"]\n # return to top of while loop, len(args) == 4, args[0] is NOT in STOP_WORDS\n # \"ml\" IS in query, so teranary returns f\"{query['ml']} {args[0]}\" (\"egg beater\") \n # f\"{query['ml']} {args[0]}\" is copied to query[\"ml\"]\n # (query is now {ml: \"egg beater\"})\n # del args[0], args is now [\"and\", \"max\", \"35\"]\n # return to top of while loop, len(args) == 3,\n # args[0] IS in STOP_WORDS (args[0] == \"and\")\n # DO NOT enter the while loop, continue past this code block\n\n # Discover the topic of our query\n elif first_word == \"about\":\n del args[0]\n count = 0\n # Datamuse allows a max of five topic words\n while len(args) and args[0] not in STOP_WORDS and count <= 5:\n query[\"topics\"] = f\"{query['topics']} {args[0]}\" if \"topics\" in query else args[0]\n del args[0]\n # count += 1 is the same as count = count + 1\n count += 1\n\n # How many results to return (max 1000)\n elif first_word in [\"max\", \"maximum\", \"only\"]:\n user_max = convert_num(second_word)\n if user_max and int(user_max) <= 1000:\n query[\"max\"] = user_max\n del args[0:2]\n\n # Remove filler words if they weren't parsed out above\n elif first_word in [\"that\",\"which\",\"and\",\"like\",\"is\"]:\n del args[0]\n\n # Add anything not otherwise parsable to the ml parameter\n else:\n query[\"ml\"] = f\"{query['ml']} {first_word}\" if \"ml\" in query else first_word\n del args[0]\n\n # this is the bottom of that massive while loop\n # if args is not empty by now, we'll start over from the top ^\n\n return query\n # and this is the end of the \"def parse(args, query)\" function\n # whew!", "def keywords_pattern():\n with open(\"keywords.txt\", 'r') as f:\n lines = [line.strip() for line in f if line.strip()]\n return set(lines)", "def load_typos_moe(file_name):\n \n # log = logging.getLogger(\"robust_ner\")\n\n file_path = os.path.join(f\"resources/typos/\", f\"{file_name}\")\n\n typos = dict()\n for line in open(file_path):\n line = line.strip().split()\n\n if len(line) != 2:\n #log.warning(f\"len(line) = {len(line)} != 2 (line: {line})\")\n continue\n\n value = line[0]\n key = line[1]\n\n #print(key, value)\n \n if key not in typos:\n typos[key] = list()\n\n typos[key].append(value)\n \n return typos", "def process_data_from_input_file(triplet):\n\n sentence = triplet.subject + ' ' + triplet.predicate + ' ' + triplet.object\n doc = nlp(unicode(sentence))\n root = doc[0]\n for t in doc:\n if t.pos_ == 'VERB' and t.head == t:\n root = t\n # elif t.pos_ == 'NOUN'\n\n # also, if only one sentence\n # root = doc[:].root\n\n\n \"\"\"\n CURRENT ASSUMPTIONS:\n - People's names are unique (i.e. there only exists one person with a certain name).\n - Pet's names are unique\n - The only pets are dogs and cats\n - Only one person can own a specific pet\n - A person can own only one pet\n \"\"\"\n\n\n # Process (PERSON, likes, PERSON) relations\n if root.lemma_ == 'like':\n if triplet.subject in [e.text for e in doc.ents if e.label_ == 'PERSON' or e.label_ == 'ORG'] and triplet.object in [e.text for e in doc.ents if e.label_ == 'PERSON' or e.label_ == 'ORG'] and \"n't\" not in triplet.predicate:\n s = add_person(triplet.subject)\n o = add_person(triplet.object)\n s.likes.append(o)\n\n if root.lemma_ == 'be' and triplet.object.startswith('friends with'):\n fw_doc = nlp(unicode(triplet.object))\n with_token = [t for t in fw_doc if t.text == 'with'][0]\n # get text after with\n after_with = fw_doc.text.split(with_token.text+ ' ')[1]\n people = []\n for p in after_with.split(' '):\n if nlp(p)[0].tag_ == 'NNP':\n people.append(nlp(p)[0].text)\n # fw_who = [t for t in with_token.children if t.dep_ == 'pobj'][0].text\n # fw_who = [e for e in fw_doc.ents if e.label_ == 'PERSON'][0].text\n for p in people:\n if triplet.subject in [e.text for e in doc.ents if e.label_ == 'PERSON']:\n s = add_person(triplet.subject)\n o = add_person(p)\n s.likes.append(o)\n o.likes.append(s)\n if root.lemma_ == 'be' and triplet.object == 'friends':\n fw_doc = nlp(unicode(triplet.subject))\n and_token = [t for t in fw_doc if t.text == 'and']\n if and_token:\n and_token = and_token[0].text\n if and_token == 'and' and fw_doc[0].text in [e.text for e in doc.ents if e.label_ == 'PERSON'] and fw_doc[2].text in [e.text for e in doc.ents if e.label_ == 'PERSON']:\n s = add_person(fw_doc[0].text)\n o = add_person(fw_doc[2].text)\n s.likes.append(o)\n o.likes.append(s)\n\n # Process (PET, has, NAME) Mary's dog's name is Rover\n if triplet.subject.endswith('name') and ('dog' in triplet.subject or 'cat' in triplet.subject):\n obj_span = doc.char_span(sentence.find(triplet.object), len(sentence))\n\n # handle single names, but what about compound names? Noun chunks might help.\n if (len(obj_span) == 1 or len(obj_span) == 2) and obj_span[-1].pos_ == 'PROPN':\n name = triplet.object\n subj_start = sentence.find(triplet.subject)\n subj_doc = doc.char_span(subj_start, subj_start + len(triplet.subject))\n\n s_people = [token.text for token in subj_doc if token.ent_type_ == 'PERSON']\n assert len(s_people) == 1\n s_person = select_person(s_people[0])\n\n pet = get_persons_pet(s_person.name)\n\n pet.name = name\n s_person.has.append(pet)\n\n # Process (Who has dog)\n if root.lemma_ == 'have'and ('dog' in triplet.object or 'cat' in triplet.object):\n # find pets name and instantiate name empty str\n obj_span = doc.char_span(sentence.find(triplet.object), len(sentence))\n name = ''\n\n if obj_span[-1].pos_ == 'PROPN':\n name = obj_span[-1].text\n s = add_person(triplet.subject)\n s_pet_type = 'dog' if 'dog' in triplet.object else 'cat'\n pet = add_pet(s_pet_type, name)\n s.has.append(pet)\n\n date = [e.text for e in doc.ents if e.label_ == 'DATE']\n gpe = [e.text for e in doc.ents if e.label_ == 'GPE']\n person = [e.text for e in doc.ents if e.label_ == 'PERSON' or e.label_ == 'ORG']\n # if person and GPE exists, we add it into trip(departs_on, departs_to)\n if person and (gpe or date):\n s = add_person(triplet.subject)\n o = add_trip(date, gpe)\n s.travels.append(o)", "def __init__(self, multiword_detector):\r\n\r\n # Save the multiword detector\r\n self.multiword_detector = multiword_detector\r\n\r\n # Initialize Leet Speak Detector\r\n self.leet_detector = LeetDetector(self.multiword_detector)\r\n\r\n ## Used for debugging/statistics\r\n #\r\n # These numbers won't add up to total passwords parsed since\r\n # some passwords might have multiple \"base words\". For example\r\n # \"pass1pass\" would be counted as two single words. Likewise,\r\n # \"123456\" would have no words\r\n #\r\n self.num_single_words = 0\r\n self.num_multi_words = 0\r\n\r\n # Keep track of the number of leet replacements detected\r\n self.num_leet = 0\r\n\r\n ## The following counters keep track of global running stats\r\n #\r\n self.count_keyboard = {}\r\n self.count_emails = Counter()\r\n self.count_email_providers = Counter()\r\n self.count_website_urls = Counter()\r\n self.count_website_hosts = Counter()\r\n self.count_website_prefixes = Counter()\r\n self.count_years = Counter()\r\n self.count_context_sensitive = Counter()\r\n self.count_alpha = {}\r\n self.count_alpha_masks = {}\r\n self.count_digits = {}\r\n self.count_other = {}\r\n self.count_base_structures = Counter()\r\n self.count_raw_base_structures = Counter()\r\n self.count_prince = Counter()", "def main(keywords_file):\n try:\n # prepare credentials for accessing twitter API\n consumer_key = os.environ.get('CONSUMER_KEY')\n consumer_secret = os.environ.get('CONSUMER_SECRET')\n access_token = os.environ.get('ACCESS_TOKEN')\n access_token_secret = os.environ.get('ACCESS_TOKEN_SECRET')\n\n if (consumer_key is None or consumer_secret is None or\n access_token is None or access_token_secret is None):\n raise EnvironmentError('Missing twitter API credentials.')\n api = auth(consumer_key=consumer_key,\n consumer_secret=consumer_secret,\n access_token=access_token,\n access_token_secret=access_token_secret)\n\n db_name = os.environ.get('DB_NAME')\n if db_name is None:\n raise EnvironmentError('Database name is missing in evn file.')\n client = pymongo.MongoClient(host='localhost', port=27017,\n appname=__file__)\n db = client[db_name]\n filepath = os.path.basename(keywords_file)\n input_filename, _ = os.path.splitext(filepath)\n collection = db[input_filename]\n\n twitterStreamListener = TwitterStreamListener(collection=collection)\n twitterStream = tweepy.Stream(auth=api.auth,\n listener=twitterStreamListener)\n\n keywords = read_keywords_file(filename=keywords_file)\n logger.info('Streamer App has started listening for keywords: '\n f'{\", \".join(keywords)}')\n twitterStream.filter(track=keywords, is_async=True)\n except requests.exceptions.HTTPError as e:\n logger.error(\"Checking internet connection failed, \"\n f\"status code {e.response.status_code}\")\n except requests.exceptions.ConnectionError:\n logger.error(\"Could not establish a connection.\")\n except (ValueError, TypeError, TweepError, KeyError,\n EnvironmentError) as e:\n logger.error(e)\n except KeyboardInterrupt:\n logger.info('Program interrupted by user. ')", "def parseSkillDesc():\n\ttry:\n\t\thandle = app.OpenTextFile(app.GetLocalePath() + \"/skilldesc.txt\")\n\t\tcount = app.GetTextFileLineCount(handle)\n\texcept IOError:\n\t\tchat.AppendChat(1, \"Could not load \" + app.GetLocalePath() + \"/skilldesc.txt\")\n\t\treturn\n\t\n\tskill_map = {}\n\n\tfor i in range(count):\n\t\tline = app.GetTextFileLine(handle, i)\n\t\tif str(line).count(\"\\t\") >= 21:\n\t\t\tSkillData = str(line).split(\"\\t\")\n\t\t\tskill_map[int(SkillData[0])] = {\n\t\t\t\t\"class\":str(SkillData[1]).lower(),\n\t\t\t\t\"name\":str(SkillData[2]),\n\t\t\t\t\"icon\":str(SkillData[12]),\n\t\t\t\t}\n\t\n\tapp.CloseTextFile(handle)\n\t\n\treturn skill_map", "def open_input_files(self):\n self.dictionaryFile = open(self.dictionaryFile, 'r', encoding=self.encoding)\n\n if self.annotationFile :\n self.annotationFile = open(self.annotationFile, 'r', encoding=self.encoding)\n elif self.annotationFile is None:\n try:\n self.annotationFile = open(os.path.join(self.dictionaryPath, self.dictionaryName + '.ann'), 'r', encoding=self.encoding)\n except FileNotFoundError:\n if self.verbose >= 2:\n sys.stdout.write (\"Warning: annotation file is not found.\\n\")\n\n if self.abbreviationsFile :\n self.abbreviationsFile = open(self.abbreviationsFile, 'r', encoding=self.encoding)\n elif self.abbreviationsFile is None:\n try:\n self.abbreviationsFile = open(os.path.join(self.dictionaryPath, self.dictionaryName + '_abrv.dsl'), 'r', encoding=self.encoding)\n except FileNotFoundError:\n if self.verbose >= 2:\n sys.stdout.write (\"Warning: abbreviations file is not found.\\n\")", "def _read(self, file_path: str) -> Iterator[Instance]:\n with open(file_path) as f:\n for line in f:\n pairs = line.split()\n words, tags = zip(*(pair.split(\"###\") for pair in pairs))\n yield self.text_to_instance([Token(word) for word in words], tags)", "def open_data(self, name):\n self._data = {}\n with open(name, \"r\", encoding=\"utf-8\") as f:\n line_is_name = True\n currentword = \"\"\n self._length = 0\n for line in f:\n if line_is_name:\n currentword = line.strip(\"\\n\")\n self._data[currentword] = Word(currentword)\n self._length += 1\n else:\n self._data[currentword].pos = line.strip(\"\\n\").split(\",\")\n self._data[currentword].update()\n line_is_name = not line_is_name\n self._canOperate = True", "def hyou_reader():\n with open(HYOU_FILE_PATH, 'r') as voc_file:\n\n voc_list = []\n lesson_list = []\n\n voc_match = [\n re.compile(r\"^(\\S+)\\s*((\\S*))\\s*〔(\\S*)〕\\s*(\\S+)\"),\n re.compile(r\"^(\\S+)\\s*((\\S*))\\s*(\\S+)\"),\n re.compile(r\"^(\\S+)\\s*〔(\\S+)〕\\s*(\\S+)\"),\n re.compile(r\"^(\\S+)\\s*(\\S+)\")\n ]\n\n voc_key = [\n {\"Voc\": 1, \"Ext\": 2, \"Type\": 3, \"Meaning\": 4},\n {\"Voc\": 1, \"Ext\": 2, \"Type\": 0, \"Meaning\": 3},\n {\"Voc\": 1, \"Ext\": 0, \"Type\": 2, \"Meaning\": 3},\n {\"Voc\": 1, \"Ext\": 0, \"Type\": 0, \"Meaning\": 2},\n ]\n\n match_count = len(voc_match)\n voc_count = 0\n lesson_count = 0\n\n for voc_line in voc_file:\n if voc_line.find(\"第\") != -1 and voc_line.find(\"课\") != -1:\n voc_len = len(voc_list)\n if voc_len > 0:\n lesson_list.append(voc_list)\n\n voc_list = []\n voc_count = 0\n lesson_count = lesson_count + 1\n sound_list = sound_reader(lesson_count)\n elif not voc_line.find(\"----\") != -1 and voc_line != \"\\n\":\n voc_line.strip()\n\n voc_dict = {}\n for i in range(0, match_count):\n voc_group = voc_match[i].match(voc_line)\n if voc_group:\n for key, value in voc_key[i].items():\n if value != 0:\n voc_dict[key] = voc_group.group(value)\n else:\n voc_dict[key] = \"\"\n break\n\n if not voc_dict.has_key(\"Voc\"):\n print voc_line\n continue\n\n voc_dict[\"Time\"] = sound_list[voc_count]\n voc_count = voc_count + 1\n voc_list.append(voc_dict)\n\n voc_len = len(voc_list)\n if voc_len > 0:\n lesson_list.append(voc_list)\n\n return lesson_list", "def test_parsing_action(self):\n self.grammar.parse(open(TEST_ASSETS + '/test_item.txt').read())", "def get_parsed_data():\n\n echonest_data_files = [f for f in os.listdir('.') if re.match(\"^echonest_[\\w]+.txt$\", f)]\n\n # Setting up header with user id and attributes\n header = ['user_id']\n header.extend(ATTRIBUTES)\n\n # printing header to standard out\n print \",\".join(header) \n\n # Processing each file to obtain parsed data\n for data_file in echonest_data_files:\n user_id = data_file[9:-4] # strip file prefix/suffix to get username/id\n parse_echonest_data_file(data_file, user_id)", "def __init__(self, c: Cmdr) -> None:\n # pylint: disable=super-init-not-called\n self.c = c\n self.init_language()\n fn = self.find_user_dict()\n self.d: dict[str, str] = self.open_dict_file(fn)\n g.app.spellDict = self.d", "def __init__(self, inFilename):\n\n self._prmtopVersion=None\n self._flags=[]\n self._raw_format={}\n self._raw_data={}\n self._has_nbfix_terms = False\n\n with open(inFilename, 'r') as fIn:\n for line in fIn:\n if line[0] == '%':\n if line.startswith('%VERSION'):\n tag, self._prmtopVersion = line.rstrip().split(None, 1)\n elif line.startswith('%FLAG'):\n tag, flag = line.rstrip().split(None, 1)\n self._flags.append(flag)\n self._raw_data[flag] = []\n elif line.startswith('%FORMAT'):\n format = line.rstrip()\n index0=format.index('(')\n index1=format.index(')')\n format = format[index0+1:index1]\n try:\n m = FORMAT_RE_PATTERN.search(format)\n self._raw_format[self._flags[-1]] = (format, m.group(1), m.group(2), int(m.group(3)), m.group(4))\n except:\n # We couldn't parse the format, so just treat the whole line as a single string.\n self._raw_format[self._flags[-1]] = (format, 1, 'a', 80, '')\n elif line.startswith('%COMMENT'):\n continue\n elif self._flags \\\n and 'TITLE'==self._flags[-1] \\\n and not self._raw_data['TITLE']:\n self._raw_data['TITLE'] = line.rstrip()\n else:\n flag=self._flags[-1]\n (format, numItems, itemType,\n iLength, itemPrecision) = self._getFormat(flag)\n line = line.rstrip()\n for index in range(0, len(line), iLength):\n item = line[index:index+iLength]\n if item:\n self._raw_data[flag].append(item.strip())\n # See if this is a CHAMBER-style topology file, which is not supported\n # for creating Systems\n self.chamber = 'CTITLE' in self._flags", "def __init__(self):\n # Initialise the base class\n fragment.__init__(self)\n # Initialise program-specific flags and\n # attributes\n self.__isccp4 = False\n self.__termination = False\n # List of keyword lines\n self.__keywords = []\n # Dictionary of logical name/filename pairs\n self.__logicalnames = {}", "def __init__(self, txt_path, in_vocab_path, out_vocab_path, sort=True):\n self.txt_seqs = open(txt_path, encoding='utf8', errors='ignore').readlines()\n self.word2id = utils.load_vocab(in_vocab_path,\n extra_word_list=[\"<UNK>\", \"<END>\"])\n self.punc2id = utils.load_vocab(out_vocab_path,\n extra_word_list=[\" \"])\n if sort:\n # Also need to sort in collate_fn cause the sentence length will\n # change after self.preprocess()\n self.txt_seqs.sort(key=lambda x: len(x.split()), reverse=True)", "def main():\n\n start_program()\n yes_syn_words, no_syn_words, stop_words, record, mp3_filename, text, device_index, output_file = \\\n process_parameter_set()\n stand_alone_flag = process_check_input_argument()\n process_speak_listen(device_index, mp3_filename, text, record, flag=1)\n text = process_name(device_index, mp3_filename, record)\n input_details = process_speak_listen(device_index, mp3_filename, text, record, flag=0)\n response = process_input_details(device_index, input_details, mp3_filename, record, yes_syn_words, no_syn_words,\n stop_words)\n process_output_file_write(output_file, response)\n process_delete_mp3_output_files(stand_alone_flag)\n exit_program()", "def __editUserPEL(self):\n from QScintilla.SpellChecker import SpellChecker\n pel = SpellChecker.getUserDictionaryPath(True)\n self.__editSpellingDictionary(pel)", "def parse(self, file):\n IniFile.parse(self, file, [\"Desktop Entry\", \"KDE Desktop Entry\"])", "def parse(self, password):\r\n\r\n # Since keyboard combos can look like many other parsings, filter them\r\n # out first\r\n\r\n section_list, found_walks, keyboard_list = detect_keyboard_walk(password)\r\n\r\n self._update_counter_len_indexed(self.count_keyboard, found_walks)\r\n\r\n # Identify e-mail and web sites before doing other string parsing\r\n # this is because they can have digits + special characters\r\n\r\n found_emails, found_providers = email_detection(section_list)\r\n\r\n for email in found_emails:\r\n self.count_emails[email] += 1\r\n for provider in found_providers:\r\n self.count_email_providers[provider] += 1\r\n\r\n found_urls, found_hosts, found_prefixes = website_detection(section_list)\r\n\r\n for url in found_urls:\r\n self.count_website_urls[url] += 1\r\n for host in found_hosts:\r\n self.count_website_hosts[host] += 1\r\n for prefix in found_prefixes:\r\n self.count_website_prefixes[prefix] += 1\r\n\r\n # Identify years in the dataset. This is done before other parsing\r\n # because parsing after this may classify years as another type\r\n\r\n found_years = year_detection(section_list)\r\n\r\n for year in found_years:\r\n self.count_years[year] += 1\r\n\r\n # Need to classify context sensitive replacements before doing the\r\n # straight type classifications, (alpha, digit, etc), but want to doing\r\n # it after other types of classifations.\r\n\r\n found_context_sensitive_strings = context_sensitive_detection(section_list)\r\n\r\n for cs_string in found_context_sensitive_strings:\r\n self.count_context_sensitive[cs_string] += 1\r\n\r\n # Identify pure alpha strings in the dataset\r\n\r\n found_alpha_strings, found_mask_list = alpha_detection(\r\n section_list,\r\n self.multiword_detector\r\n )\r\n\r\n self._update_counter_len_indexed(self.count_alpha, found_alpha_strings)\r\n self._update_counter_len_indexed(self.count_alpha_masks, found_mask_list)\r\n\r\n # Identify pure digit strings in the dataset\r\n\r\n found_digit_strings = digit_detection(section_list)\r\n\r\n self._update_counter_len_indexed(self.count_digits, found_digit_strings)\r\n\r\n # Categorize everything else as other\r\n\r\n found_other_strings = other_detection(section_list)\r\n\r\n self._update_counter_len_indexed(self.count_other, found_other_strings)\r\n\r\n # Calculate the counts of the individual sections for PRINCE dictionary\r\n # creation\r\n\r\n prince_evaluation(self.count_prince, section_list)\r\n\r\n # Now after all the other parsing is done, create the base structures\r\n\r\n is_supported, base_structure = base_structure_creation(section_list)\r\n\r\n if is_supported:\r\n self.count_base_structures[base_structure] += 1\r\n\r\n self.count_raw_base_structures[base_structure] += 1\r\n\r\n return True", "def read_uc(input, tax_separator, tax_sense):\n with open(input, 'r') as uc_file:\n dict_taxs = {}\n for line in uc_file:\n line = line.strip().split()\n is_hit = line[0] == 'H' # check if line is for a hit or for a no hit ('H' vs 'N', respectively)\n if is_hit:\n read_id = line[8] # take read id, located in 9th column of the file\n if tax_sense == 'asc':\n taxonomy = line[9].split(tax_separator) # take taxonomy column and split it\n elif tax_sense == 'desc':\n taxonomy = line[9].split(tax_separator)[::-1] # take taxonomy and reverse order\n try:\n dict_taxs[read_id]['hits'] += 1 # sum hits for each sequence\n dict_taxs[read_id]['taxonomy'].append(taxonomy) # add taxonomy to taxonomy dict\n except KeyError: # fires when a read_id is read for the first time\n percentage_identity = line[3] # take percentage_identity to the database\n cigar_alignment = line[7]\n dict_taxs[read_id] = {'hits': 1,\n 'taxonomy': [taxonomy],\n 'perc_id': percentage_identity,\n 'alignment': cigar_alignment}\n return dict_taxs", "def text_by_paragraph(self,\r\n filename,\r\n splitchar=EOL,\r\n keys=True,\r\n key_definitions=False,\r\n query=True):\r\n\r\n\r\n analysetext = file_access.get_text_file(filename)\r\n #load the text to be analysed\r\n\r\n if keys:\r\n\r\n possible_keys = set()\r\n if len(self.keys())>50:\r\n nprint (\"TOO MANY KEYS\")\r\n for key in self.keys():\r\n #grab all keys, removing tags.\r\n #DESIDERATUM: Make it possible to\r\n #restrict the range of notes\r\n #from which the keys are grabbed\r\n\r\n if SLASH in key:\r\n if key.split(SLASH)[0] != EMPTYCHAR:\r\n possible_keys.add(key.split(SLASH)[0].lower())\r\n else:\r\n possible_keys.add(key.split(SLASH)[0].lower())\r\n\r\n\r\n possible_keys = list(possible_keys)\r\n\r\n possible_keys = show_list(possible_keys,\r\n from_here=0,\r\n to_here=len(possible_keys),\r\n label='KEYS',\r\n select=True,\r\n display=display)\r\n # show the keys through display\r\n #object and select which are to be kept\r\n possible_keys += input(queries.ADDITIONAL_KEYS).split(COMMA)\r\n display.noteprint((labels.KEYS,\r\n formkeys(possible_keys)))\r\n\r\n\r\n for paragraph in analysetext.split(splitchar):\r\n # iterate over segments of the text to be analysed\r\n found_words = set()\r\n keyset = set()\r\n\r\n if keys:\r\n found_words.update({a_temp for a_temp in get_words(paragraph)\r\n if len(a_temp) > 3}.intersection(set(possible_keys)))\r\n # make a set of all the words that have been found\r\n keyset = found_words\r\n if key_definitions:\r\n found_words.update(self.default_dict['definitions']\r\n .return_keys(get_words(paragraph)))\r\n keyset = found_words\r\n\r\n display.noteprint((formkeys(keyset),\r\n nformat.encase(paragraph,\r\n found_words,\r\n surround=False)))\r\n # display the segment as a note\r\n #with found words encased\r\n #in arrow brackets\r\n\r\n if not query:\r\n if keyset == set():\r\n keyset = {VOIDTERM}\r\n if paragraph.strip() != EMPTYCHAR:\r\n self.enter(ek=keyset,\r\n et=paragraph)\r\n\r\n else:\r\n\r\n if input(queries.INCLUDE) in YESTERMS+[EMPTYCHAR]:\r\n # ask if the found words\r\n #should be included as keys\r\n\r\n newkeys = set(input(formkeys(keyset)\r\n +queries.KEYWORDS_TO_ADD).split(COMMA)).union(keyset)\r\n if paragraph.strip() != EMPTYCHAR:\r\n self.enter(ek=newkeys, et=paragraph)\r\n if input(queries.CONTINUE + BLANK) not in YESTERMS+[EMPTYCHAR]:\r\n break", "def __init__(self):\n self._pronunciations = nltk.corpus.cmudict.dict()", "def __init__(self):\n self._pronunciations = nltk.corpus.cmudict.dict()", "def __init__(self, utkface_filenames = 'utkface_images.txt'):\n self.metadata = self.load_metadata(utkface_filenames)\n self.prefix = ''\n return", "async def parse(self, raw: str) -> dict:" ]
[ "0.58220077", "0.5802064", "0.57448256", "0.54425025", "0.5314302", "0.52784765", "0.5223711", "0.51647186", "0.51590455", "0.5118437", "0.51034755", "0.5095762", "0.5017907", "0.50099504", "0.49973136", "0.49962452", "0.49901965", "0.4973686", "0.49708888", "0.4963419", "0.49502033", "0.4945873", "0.49448076", "0.4931639", "0.4928282", "0.4927819", "0.49247798", "0.4911911", "0.49064827", "0.49051127", "0.49008268", "0.48830375", "0.4882647", "0.48741752", "0.48334667", "0.48224965", "0.48175418", "0.48048386", "0.4804074", "0.47922102", "0.47805208", "0.4773002", "0.47710803", "0.47704324", "0.47635543", "0.47566476", "0.47469962", "0.4737657", "0.47297361", "0.47207543", "0.47200012", "0.4702735", "0.4693539", "0.46907616", "0.46891323", "0.46826333", "0.46786705", "0.46722594", "0.4668286", "0.46580383", "0.46541458", "0.46417364", "0.46304756", "0.4617858", "0.4617086", "0.4615558", "0.46092713", "0.46062034", "0.4599113", "0.45985088", "0.4595277", "0.45840377", "0.45760122", "0.45709732", "0.45657733", "0.45647377", "0.45618868", "0.45600173", "0.4557529", "0.45432606", "0.45429438", "0.4537278", "0.45359063", "0.4530652", "0.45270395", "0.45256546", "0.45137468", "0.4512048", "0.45090872", "0.450235", "0.44955835", "0.44952524", "0.44911984", "0.44901884", "0.44858018", "0.44843408", "0.44764587", "0.44764587", "0.44731146", "0.44665638" ]
0.4876919
33
Input totally data frame then output daily data frame.
def get_increased_data(input_data_frame): desc_data = input_data_frame.iloc[:, ::-1] for index, col in desc_data.iteritems(): desc_data[index] = desc_data[index] - desc_data[index - 1] if index == 2: asc_data = desc_data.iloc[:, ::-1] return asc_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def output_daily_files(dataframe, path, filename):\n\n days = dataframe.groupby('date_time_day')\n dataframe.groupby('date_time_day').size().reset_index(name='data points per day')\n\n for day in days.groups:\n print(day.date())\n output_path = path + filename + \"_\" + str(day.date()) + '.csv'\n print(\"Creating intermediate flagged data file: \", output_path)\n days.get_group(day).to_csv(output_path, index=False)", "def daily(self):\n african_cases = self.transform()\n df = african_cases.copy()\n df_latest = df.iloc[0].reset_index()\n df_latest.columns = df_latest.iloc[0]\n df_latest.drop([0],axis=0,inplace=True)\n df_latest.rename(columns={'Date':'Country'},inplace=True)\n daily_case = df_latest.copy()\n return daily_case", "def read_and_prepare_dataframe(start_date='1980-01-01'):\n \n # Read the dataset and rename 'dt' to 'Date'\n df = pd.read_csv('Data/GlobalLandTemperaturesByCountry.csv', parse_dates=['dt'])\n df.rename(columns={'dt':'Date'}, inplace=True)\n \n # Filter for Canada\n df = df[df['Country']=='Canada']\n \n # Filter out data prior to start date\n df = df[df['Date'] >= start_date]\n \n # To ensure data is sorted\n df = df.sort_values('Date')\n \n # Set index to Date and return the final dataframe\n return df.set_index('Date')", "def to_stock_dataframe_day(self, date):\n if type(date) is not datetime.datetime and type(date) is not pd.tslib.Timestamp:\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n\n class_data = [i for i in dir(self) if not callable(getattr(self, i)) and\n not i.startswith(\"__\") and type(getattr(self, i)) is pd.DataFrame]\n df = pd.DataFrame()\n for i in class_data:\n df = join_features(df, getattr(self, i), fill_method=FillMethod.FUTURE_KROGH)\n return df.ix[date, :]", "def process(self, inputs):\n df = cudf.read_csv(self.conf['path'])\n # extract the year, month, day\n ymd = df['DTE'].astype('str').str.extract(r'(\\d\\d\\d\\d)(\\d\\d)(\\d\\d)')\n # construct the standard datetime str\n df['DTE'] = ymd[0].str.cat(ymd[1],\n '-').str.cat(ymd[2],\n '-').astype('datetime64[ms]')\n df = df[['DTE', 'OPEN', 'CLOSE', 'HIGH', 'LOW', 'SM_ID', 'VOLUME']]\n df['VOLUME'] /= 1000\n # change the names\n df.columns = ['datetime', 'open', 'close',\n 'high', 'low', \"asset\", 'volume']\n return df", "def get_data(day, unixstart=None, unixend=None):\n global subnet\n df_data = pd.DataFrame([])\n while len(df_data.index)<=0:\n try:\n df_data = pd.read_feather(f'/home/pi/studies/ardmore/homeserver/h{subnet}_{day}.feather')\n except Exception as e:\n # print(f\"Error grid_server.get_data:{e}\")\n pass\n if unixstart!=None:\n df_data = df_data[(df_data['unixtime']>=unixstart)&(df_data['unixtime']<=unixend)]\n float_cols = [x for x in df_data.columns if not x.startswith('timezone')]\n df_data = df_data[float_cols].astype(float)\n return df_data", "def date_single(\n input_df: pd.DataFrame, col_name: str, cur_time: dt.datetime = dt.datetime.now()\n):\n df = input_df[[col_name]].copy()\n df[col_name] = pd.to_datetime(df[col_name])\n df[col_name + \"_age\"] = cur_time.year - df[col_name].dt.year\n df[col_name + \"_year\"] = df[col_name].dt.year\n df[col_name + \"_month\"] = df[col_name].dt.month\n df[col_name + \"_day\"] = df[col_name].dt.day\n df[col_name + \"_hour\"] = df[col_name].dt.hour\n df[col_name + \"_minute\"] = df[col_name].dt.minute\n df[col_name + \"_second\"] = df[col_name].dt.second\n df[col_name + \"_day_of_week\"] = df[col_name].dt.dayofweek\n df[col_name + \"_day_of_year\"] = df[col_name].dt.dayofyear\n df[col_name + \"_week_of_year\"] = df[col_name].dt.weekofyear\n df[col_name + \"_is_weekend\"] = (df[col_name + \"_day_of_week\"] == SATURDAY) | (\n df[col_name + \"_day_of_week\"] == SUNDAY\n )\n df[col_name + \"_year_elapsed\"] = (cur_time - df[col_name]).dt.days / DAYS_IN_YEAR\n df[col_name + \"_month_elapsed\"] = (cur_time - df[col_name]).dt.days / DAYS_IN_MONTH\n df[col_name + \"_day_elapsed\"] = (cur_time - df[col_name]).dt.days\n df[col_name + \"_month_sin\"] = _get_cyclical_sin(\n df, col_name, \"month\", MONTH_IN_YEAR\n )\n df[col_name + \"_month_cos\"] = _get_cyclical_cos(\n df, col_name, \"month\", MONTH_IN_YEAR\n )\n df[col_name + \"_day_sin\"] = _get_cyclical_sin(\n df, col_name, \"day\", df[col_name + \"_max_day\"]\n )\n df[col_name + \"_day_cos\"] = _get_cyclical_cos(\n df, col_name, \"day\", df[col_name + \"_max_day\"]\n )\n df[col_name + \"_hour_sin\"] = _get_cyclical_sin(df, col_name, \"hour\", HOUR_IN_DAY)\n df[col_name + \"_hour_cos\"] = _get_cyclical_cos(df, col_name, \"hour\", HOUR_IN_DAY)\n df[col_name + \"_minute_sin\"] = _get_cyclical_sin(\n df, col_name, \"minute\", MINUTE_IN_HOUR\n )\n df[col_name + \"_minute_cos\"] = _get_cyclical_cos(\n df, col_name, \"minute\", MINUTE_IN_HOUR\n )\n df[col_name + \"_second_sin\"] = _get_cyclical_sin(\n df, col_name, \"second\", SECOND_IN_MINUTE\n )\n df[col_name + \"_second_cos\"] = _get_cyclical_cos(\n df, col_name, \"second\", SECOND_IN_MINUTE\n )\n df[col_name + \"_is_year_start\"] = df[col_name].dt.is_year_start\n df[col_name + \"_is_year_end\"] = df[col_name].dt.is_year_end\n df[col_name + \"_is_quarter_start\"] = df[col_name].dt.is_quarter_start\n df[col_name + \"_is_quarter_end\"] = df[col_name].dt.is_quarter_end\n df[col_name + \"_is_month_start\"] = df[col_name].dt.is_month_start\n df[col_name + \"_is_month_end\"] = df[col_name].dt.is_month_end\n df[col_name + \"_is_business_hour\"] = (df[col_name + \"_hour\"] > BUSINESS_OPEN) & (\n df[col_name + \"_hour\"] < BUSINESS_CLOSE\n )\n df[col_name + \"_period\"] = pd.cut(\n df[col_name + \"_hour\"],\n bins=[MIDNIGHT_START, MORNING_START, AFTERNOON_START, NIGHT_START, NIGHT_END],\n labels=[\"dawn\", \"morning\", \"afternoon\", \"night\"],\n )\n return df.remove(columns=col_name)", "def get_data_extended(self, inception_date, interval):\n instrument = self.instrumentLookup()\n from_date = dt.datetime.strptime(inception_date, \"%Y-%m-%d\")\n to_date = dt.date.today()\n data = pd.DataFrame(columns=[\"date\", \"open\", \"high\", \"low\", \"close\", \"volume\"])\n while True:\n if from_date.date() >= (dt.date.today() - dt.timedelta(100)):\n data = data.append(\n pd.DataFrame(\n self.kite.historical_data(\n instrument, from_date, dt.date.today(), interval\n )\n ),\n ignore_index=True,\n )\n break\n else:\n to_date = from_date + dt.timedelta(100)\n data = data.append(\n pd.DataFrame(\n self.kite.historical_data(\n instrument, from_date, to_date, interval\n )\n ),\n ignore_index=True,\n )\n from_date = to_date\n data.set_index(\"date\", inplace=True)\n self.data_df = data", "def transform(self, y=None):\n\n df = self.X.copy()\n num_days = (\n int(\n np.timedelta64((max(df[\"date\"]) - min(df[\"date\"])), \"D\")\n / np.timedelta64(1, \"D\")\n )\n + 1\n )\n start = pd.to_datetime(min(df[\"date\"]))\n dates = [(start + np.timedelta64(i, \"D\")) for i in range(num_days)]\n\n seq = pd.DataFrame({\"dt_time\": dates, \"day_seq\": np.arange(num_days)})\n seq[\"date\"] = seq[\"dt_time\"].dt.date\n\n df1 = df.join(seq.set_index(\"date\"), on=\"date\")\n\n df1[\"year\"] = df1[\"dt_time\"].dt.year\n df1[\"month\"] = df1[\"dt_time\"].dt.month\n df1[\"day\"] = df1[\"dt_time\"].dt.day\n df1[\"day_of_week\"] = df1[\"dt_time\"].dt.weekday\n df1[\"month_day\"] = df1[\"dt_time\"].dt.strftime(\"%m/%d\")\n df1[\"month_weekday\"] = df1[\"dt_time\"].dt.strftime(\"%b_%a\")\n df1[\"month\"] = df1[\"dt_time\"].dt.strftime(\"%m/%d\")\n return df1", "def transform(self, dataframe: DataFrame) -> DataFrame:", "def process_data(self):\n logging.debug('process_data called')\n\n pd_time_series = pd.read_csv(f'{self.out_dir}docs/downloaded/'\n f'{self.filename}')\n\n pd_time_series = pd_time_series.drop('Lat', axis=1)\n pd_time_series = pd_time_series.drop('Long', axis=1)\n no_of_dates = len(pd_time_series.columns) - 2\n dateindex = pd.date_range(start='1-22-2020',\n periods=no_of_dates,\n freq='D').strftime('%d-%m')\n\n new_cols = ['Province/State', 'Country/Region']\n for index in dateindex:\n new_cols.append(index)\n pd_time_series.columns = new_cols\n\n pd_time_series = pd_time_series.drop('Province/State', axis=1)\n pd_edit_series = pd_time_series.set_index('Country/Region')\n\n pd_edit_series = pd_edit_series.T\n\n return pd_edit_series", "def get_dataframe(start_date=INITIAL_DATE, end_date=None) -> pd.DataFrame:\n\n end_date = (\n end_date\n if end_date is not None\n else dt.datetime.utcnow() - dt.timedelta(days=1)\n ).date()\n\n dates = pd.date_range(start_date, end_date)\n\n with futures.ThreadPoolExecutor() as ex:\n\n df = pd.concat(ex.map(get_dataframe_for_date, dates))\n\n df.rename(columns=str.lower, inplace=True)\n\n df.drop(columns=[c for c in df.columns if \"/\" in c], inplace=True)\n\n df[\"datetime\"] = pd.to_datetime(df[\"last_update\"])\n\n df[\"date\"] = df.datetime.map(lambda d: d.date())\n\n # df[\"county\"] = df.admin2\n renames = {\n \"country_region\": \"country\",\n \"province_state\": \"state\",\n \"admin2\": \"county\",\n }\n\n df.rename(columns=renames, inplace=True)\n\n df.drop(\n columns=[\"last update\", \"last_update\", \"lat\", \"long_\", \"combined_key\"],\n inplace=True,\n )\n\n return df", "def time_series_daily_adj(symbol: str, outputsize: str ='compact') -> Tuple[pd.DataFrame, dict]:\n response = _fetch(symbol=symbol, function='TIME_SERIES_DAILY_ADJUSTED', outputsize=outputsize)\n\n response_dict = json.loads(response.content)\n\n df = pd.DataFrame.from_dict(response_dict[f'Time Series (Daily)'], orient='index', dtype=np.float64)\n df.index = pd.to_datetime(df.index)\n df = df.rename(columns=_string_map(df.columns))\n\n metadata = response_dict['Meta Data']\n _rename_dict_keys(metadata)\n\n metadata['begin_datetime'] = df.index.min()\n metadata['end_datetime'] = df.index.max()\n\n return df, metadata", "def time_series_daily(symbol: str, outputsize: str = 'compact') -> Tuple[pd.DataFrame, dict]:\n response = _fetch(symbol=symbol, function='TIME_SERIES_DAILY', outputsize=outputsize)\n\n response_dict = json.loads(response.content)\n\n df = pd.DataFrame.from_dict(response_dict[f'Time Series (Daily)'], orient='index', dtype=np.float64)\n df.index = pd.to_datetime(df.index)\n df = df.rename(columns=_string_map(df.columns))\n\n metadata = response_dict['Meta Data']\n _rename_dict_keys(metadata)\n\n metadata['begin_datetime'] = df.index.min()\n metadata['end_datetime'] = df.index.max()\n\n return df, metadata", "def date_double(input_df: pd.DataFrame, begin_col: str, end_col: str):\n df = input_df[[begin_col, end_col]].copy()\n df[begin_col] = pd.to_datetime(df[begin_col])\n df[end_col] = pd.to_datetime(df[end_col])\n df[\"{}_{}_year\".format(begin_col, end_col)] = (\n df[end_col] - df[begin_col]\n ).dt.days / DAYS_IN_YEAR\n df[\"{}_{}_month\".format(begin_col, end_col)] = (\n df[end_col] - df[begin_col]\n ).dt.days / DAYS_IN_MONTH\n df[\"{}_{}_days\".format(begin_col, end_col)] = (df[end_col] - df[begin_col]).dt.days\n df[\"{}_{}_hour\".format(begin_col, end_col)] = (\n df[end_col] - df[begin_col]\n ).dt.seconds / SECOND_IN_HOUR\n df[\"{}_{}_minute\".format(begin_col, end_col)] = (\n df[end_col] - df[begin_col]\n ).dt.seconds / SECOND_IN_MINUTE\n df[\"{}_{}_second\".format(begin_col, end_col)] = (\n df[end_col] - df[begin_col]\n ).dt.seconds\n return df.drop(columns=[begin_col, end_col])", "def load_daily_data():\n return pd.read_csv(os.path.join('data', 'raw', 'full_grouped.csv'))", "def getEPADailyData(dateint, dt_ind, month, epa_df, yr):\n\n try:\n start = dateint + dt_ind * 10000\n end = start + 10001\n dly_epa_df = epa_df[(epa_df.created >= start) & (epa_df.created < end)]\n dly_epa_df.reset_index(inplace=True, drop=True)\n\n new_df = pd.DataFrame(columns=['lat', 'lon', 'utc', 'parameter', 'epa_pm25_unit', 'epa_pm25_value', 'raw_concentration', 'aqi', 'category', 'site_name', 'agency_name', 'full_aqs_code', 'intl_aqs_code', 'created'])\n for sitenm in dly_epa_df.site_name.unique():\n indx_ct = 0\n site_df = dly_epa_df[dly_epa_df.site_name == sitenm]\n for i in site_df.created.unique():\n indx_ct += 1\n new_df = pd.concat([new_df,site_df.iloc[indx_ct - 1:indx_ct]],ignore_index=True)\n\n if i != site_df.created.max(): # Don't interpolate the last record\n tmp_df = site_df.iloc[indx_ct - 1:indx_ct][['lat', 'lon', 'utc', 'parameter', 'epa_pm25_unit', 'category', 'site_name', 'agency_name', 'full_aqs_code', 'intl_aqs_code']]\n for j in range(1,6):\n new_dt = i + j * 10\n tmp_df['created'] = int(new_dt)\n tmp_df['epa_pm25_value'] = np.nan\n tmp_df['raw_concentration'] = np.nan\n tmp_df['aqi'] = np.nan\n new_df = pd.concat([new_df,tmp_df],ignore_index=True)\n\n # Convert aqi to numerica for so that it gets interpolated\n new_df[['aqi']] = new_df[['aqi']].replace(\"nan\", np.nan, regex=True)\n new_df[['aqi']] = new_df[['aqi']].apply(pd.to_numeric)\n\n new_df = new_df.interpolate(method='linear', limit_direction='forward', axis=0)\n\n int_epa_df = new_df[(new_df.created >= start) & (new_df.created < (end - 1))]\n int_epa_df.reset_index(inplace=True, drop=True)\n \n # Write to S3\n s3 = s3fs.S3FileSystem()\n myopen = s3.open\n write('midscapstone-whos-polluting-my-air/EpaDaily/epa_20{2}{0}{1:02}.parquet'.format(month, dt_ind, yr), int_epa_df, compression='GZIP', open_with=myopen)\n s3_resource = boto3.resource('s3')\n s3_resource.Object('midscapstone-whos-polluting-my-air', 'EpaDaily/epa_20{2}{0}{1:02}.parquet'.format(month, dt_ind, yr)).Acl().put(ACL='public-read')\n\n except Exception as e:\n print(\"*** EXCEPTION IN GET EPA DAILY DATA *** {}\".format(e))\n return int_epa_df", "def transform(self, y=None):\n num_days = (\n int(\n np.timedelta64(\n pd.to_datetime(self.end_date) - pd.to_datetime(self.start_date), \"D\"\n )\n / np.timedelta64(1, \"D\")\n )\n + 1\n )\n dates = [\n (pd.to_datetime(self.start_date) + np.timedelta64(i, \"D\"))\n for i in range(num_days)\n ]\n start_seq = int(\n (\n np.timedelta64(\n pd.to_datetime(self.start_date) - pd.to_datetime(self.model_end[0]),\n \"D\",\n )\n + self.model_end[1]\n )\n / np.timedelta64(1, \"D\")\n )\n df = pd.DataFrame(\n {\"dt_time\": dates, \"day_seq\": np.arange(start_seq, start_seq + num_days)}\n )\n df[\"date\"] = df[\"dt_time\"].dt.date\n df[\"year\"] = df[\"dt_time\"].dt.year\n df[\"month\"] = df[\"dt_time\"].dt.month\n df[\"day\"] = df[\"dt_time\"].dt.day\n df[\"day_of_week\"] = df[\"dt_time\"].dt.weekday\n df[\"month_day\"] = df[\"dt_time\"].dt.strftime(\"%m/%d\")\n df[\"month_weekday\"] = df[\"dt_time\"].dt.strftime(\"%b_%a\")\n return df", "def create_data_table(df: pd.DataFrame) -> pd.DataFrame:\n\n df = df.copy()\n\n # Normalize times by labeling all of today's data with its future label, 00:00\n # tomorrow (as that's the timestamp marking the end of the 24-hour data collection\n # period). No need to adjust data not from today; it's already been adjusted and is\n # labeled with the date whose 00:00 marked the end of data collection (i.e., data\n # generated on Mar 20 is labeled Mar 21).\n normalized_dates = df[Columns.DATE].dt.normalize()\n is_at_midnight = df[Columns.DATE] == normalized_dates\n df.loc[~is_at_midnight, Columns.DATE] = normalized_dates[\n ~is_at_midnight\n ] + pd.Timedelta(days=1)\n df[Columns.DATE] = df[Columns.DATE].dt.strftime(r\"%Y-%m-%d\")\n\n df = df.drop(\n columns=[\n Columns.IS_STATE,\n Columns.LOCATION_NAME,\n Columns.OUTBREAK_START_DATE_COL,\n Columns.DAYS_SINCE_OUTBREAK,\n Columns.POPULATION,\n Columns.STAGE,\n Columns.COUNT_TYPE,\n ]\n )\n\n df = (\n df.pivot_table(\n index=[\n c\n for c in df.columns\n if c not in [Columns.CASE_TYPE, Columns.CASE_COUNT]\n ],\n columns=Columns.CASE_TYPE,\n values=Columns.CASE_COUNT,\n aggfunc=\"first\",\n )\n .reset_index()\n .sort_values([Columns.COUNTRY, Columns.STATE, Columns.DATE])\n )\n\n for col in CaseInfo.get_info_items_for(\n InfoField.CASE_TYPE, count=Counting.TOTAL_CASES\n ):\n df[col] = pd.to_numeric(df[col], downcast=\"integer\")\n\n # save_path = Paths.DATA / \"data_table.csv\"\n # df.to_csv(save_path, index=False)\n # print(f\"Saved data to {save_path.relative_to(Paths.ROOT)}\")\n\n return df", "def process_data(dataframe):\n\n # add a new column for just the date in date format\n dataframe = createDateColumn(dataframe)\n # get the frequency of each date\n frequency = dataframe['Date Merged'].value_counts()\n # converting to df and assigning new names to the columns\n df_value_counts = pd.DataFrame(frequency)\n df_value_counts = df_value_counts.reset_index()\n # change column names\n df_value_counts.columns = ['dates', 'counts']\n # delete the the row with None\n dateFreq = df_value_counts.loc[df_value_counts[\"dates\"] != \"None\"]\n\n # 1. Create a graph for number of PRs merged over time\n numPRMerged_graph(dateFreq)\n # 2. Create a graph for avg PR merge time\n avgMergetime(dataframe)\n # 3. A table with PR info for each month\n getMonthlyPRinfo(dataframe)", "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n if \"SPY\" not in symbols:\n symbols.insert(0, \"SPY\")\n for symbol in symbols:\n temp = pd.read_csv(symbol_to_path(symbol, base_dir=\"data\"), \n index_col=\"Date\", \n parse_dates=True, \n usecols=[\"Date\", \"Adj Close\"])\n \n temp = temp.rename(columns={\"Adj Close\": symbol})\n \n df = df.join(temp, how=\"inner\")\n df = df.sort_index(axis=0, ascending=[1])\n \n return df", "def __get_latest_data(table_name='derivatives_economicindicatorstandard'):\n # create query and get data\n query = 'SELECT * FROM ' + table_name\n df = AccessDB().run_read_query(query)\n\n if table_name == 'derivatives_economicindicatorstandard':\n df = pd.DataFrame(df.groupby(['dbcode', 'indicator', 'country', 'freq', 'flow'])['date'].max())\n else:\n df = pd.DataFrame(df.groupby(['dbcode', 'indicator', 'country', 'freq', 'counter_party'])['date'].max())\n df.reset_index(inplace=True)\n return df", "def create_date_features(df = None, date = None):\n #TODO", "def clean_meteo_data(self, df):\n for col in df.columns:\n df[col] = df[col].str.replace(',', '.').astype(\"float\")\n# df_nan = df[df.isna().any(axis=1)]\n# print(\"Check Nans:\",df_nan.shape[0])\n df=df.fillna(method='ffill')\n# df_nan = df[df.isna().any(axis=1)]\n# print(\"Check Nans:\",df_nan.shape[0])\n# print(\"shape selected sensor data:\",df.shape)\n df=df.dropna()\n df=df.resample(\"10T\").mean()\n df=df.reset_index()\n df['dag']=df['datetime'].dt.day\n return df", "def get_price_df(url):\n df = pd.read_csv(url).dropna()\n df.index = pd.to_datetime(df['Date'])\n df = df.drop(columns=['Date'])\n return df", "def _get_data(self):\n \n print(\"Getting Data...\")\n self.data = sgs.dataframe(self.serie_name, \n start = self.start_date, \n end = self.end_date)\n\n print(f\"Done! {self.data.shape[0]} rows were collected\")\n \n self.data.reset_index(inplace=True)\n self.data.columns = ['date', 'cdi']\n\n return self.data", "def working_data(df, date_of_interest, lower_window, upper_window):\n\n # Actual dates we are interested in\n lower_date = date_of_interest - timedelta(days=lower_window)\n upper_date = date_of_interest + timedelta(days=upper_window)\n\n # Specs want us to call more than that\n lower_date_extreme = date_of_interest - timedelta(days=(2 * lower_window + 1))\n upper_date_extreme = date_of_interest + timedelta(days=(2 * upper_window))\n\n # Tighten to the range we want (and show non-trading days too)\n df = df.reindex(pd.date_range(lower_date_extreme, upper_date_extreme, freq='D'))\n df = df.rename(columns={'volume': 'Volume'})\n df['Volume'] = df['Volume'].fillna(0)\n df['close'] = df['close'].fillna(method='ffill')\n\n # Tag with relative dates\n df = df.apply(tag_relative_date, axis=1, args=(date_of_interest, lower_date, upper_date))\n\n # Calculate the data we want\n df['Return'] = df['close'].diff()\n df['Return_pct'] = df['close'].pct_change()\n df['Daily_Spread'] = df['high'] - df['low']\n df['Daily_Spread'] = df['Daily_Spread'].fillna(0)\n\n return df", "def get_data(symbols, dates):\n \n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols:\n symbols.insert(0,'SPY')\n for symbol in symbols:\n \n df1 = pd.read_csv(symbol_to_path(symbol),usecols=['Date','Adj Close'],\n index_col='Date',na_values =['nan'] )\n df1 = df1.rename(columns ={\"Adj Close\": symbol}) \n #print df1\n df = df.join(df1,how='inner')\n \n return df.sort_index()", "def timeseries_dataframe(self):\n return", "def create_data_structure(dataframe):\n\n dataframe = dataframe.groupby(\"name\").agg({\"date_new\": \"min\",\n \"url\": \"count\"})\n dataframe.columns = ['recency', 'frequency']\n dataframe[\"recency\"] = dataframe[\"recency\"].astype(\"int64\")\n\n return dataframe", "def get_dummy_data(num_days, low, high, end_date='1970-01-01'):\n step = (high - low) / (num_days - 1)\n ref = datetime.strptime(end_date, '%Y-%m-%d').date()\n start_dt = ref - timedelta(days=(num_days - 1))\n end_dt = ref + timedelta(days=1)\n ts = np.arange(start_dt, end_dt, timedelta(days=1)).astype(date)\n df = pd.DataFrame(data={'price': np.arange(low, high + 1, step)}, index=pd.DatetimeIndex(ts))\n df.index.name = 'date'\n return df", "def load_data(city_input, month_input, day_input):\n # Read csv for city_input using CITY_DATA dictionary to create df\n df = pd.read_csv(CITY_DATA[city_input])\n\n # Convert 'Start Time' and 'End Time' columns in df to datetime with pd.to_datetime function\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n # Include month number in df using dt.month\n df['Start Month'] = df['Start Time'].dt.month\n\n # Include weekday in df using dt.weekday_name - note its format, e.g. Monday\n df['Start Day'] = df['Start Time'].dt.weekday_name\n\n # Include hour in df using dt.hour\n df['Start Hour'] = df['Start Time'].dt.hour\n\n ## Month\n if month_input != 'all':\n # Create a list of months based on months indices using .index(element)\n MONTHS = ['january', 'february', 'march', 'april', 'may', 'june']\n # Python uses 0 indexing so we need to increase the values by 1 to correspond with month numbers\n month = MONTHS.index(month_input) + 1\n # Filter by month to create the new dataframe\n df = df[df['Start Month'] == month] # where month is the indexed version of the user input\n\n ## Day\n # Reformat day_input to Friday, for example\n day = day_input.title()\n\n if day != 'All':\n # Create a list of days\n DAYS = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday', 'Sunday', 'All']\n # Filter by day of week to create the new dataframe\n if day != 'All':\n df = df[df['Start Day'] == day]\n\n # Replace 'Trip Duration' with calculated version\n # This felt simpler than converting the number of seconds into days, hours, minutes, seconds ;)\n df['Trip Duration'] = df['End Time'] - df['Start Time']\n\n # print(df.head(20))\n return df", "def url_to_df(url, station_name, dictionary):\n \n path_to_data = os.path.join(et.data.get_data(url=url))\n \n dataframe = pd.read_csv(path_to_data)\n \n dataframe['Station ID'] = station_name\n \n output_dataframe = dataframe[['Station ID', 'year', 'month', 'day', 'doy',\n 'sm_5cm', 'sm_10cm', 'sm_20cm', 'sm_50cm', 'sm_100cm']]\n \n output_dataframe['month'].replace({1: \"Jan\", 2: \"Feb\", 3: \"Mar\", \n 4: \"Apr\", 5: \"May\", 6: \"Jun\", \n 7: \"Jul\", 8: \"Aug\", 9: \"Sep\", \n 10: \"Oct\", 11: \"Nov\", 12: \"Dec\"}, \n inplace=True)\n \n cut_labels = ['decad0', 'decad1', 'decad2']\n cut_bins = [0, 10, 20, 31]\n output_dataframe['decad'] = pd.cut(output_dataframe['day'], bins=cut_bins, labels=cut_labels)\n \n cut_labels = ['pentad0', 'pentad1', 'pentad2', 'pentad3', 'pentad4', 'pentad5']\n cut_bins = [0, 5, 10, 15, 20, 25, 31]\n output_dataframe['pentad'] = pd.cut(output_dataframe['day'], bins=cut_bins, labels=cut_labels)\n \n dictionary.update({station_name: output_dataframe})", "def create_df():\n df = load_df_from_files()\n df = clean_df(df)\n df = expand_df_dates(df)\n df[\"age_at_t\"] = ((df[\"date\"] - df[\"birthday\"]) / 365).dt.days # Yeah, this is weird.\n return df", "def daily_viewed(df):\n df = convert_to_datetime(df)\n today = datetime.date.today()\n yesterday = today - timedelta(days=1)\n todays_per_min = []\n yesterday_per_min = []\n today_viewed = []\n yesterday_viewed = []\n # this iterates over each row in the dataframe, applying the logic and adding the cards_per_min value to the\n # appropriate list\n for index, row in df.iterrows():\n if row['session_start'].date() == today:\n per_min = get_cards_per_min(row)\n todays_per_min.append(per_min)\n today_viewed.append(row['total_looked_at'])\n if row['session_start'].date() == yesterday:\n per_min = get_cards_per_min(row)\n yesterday_per_min.append(per_min)\n yesterday_viewed.append(row['total_looked_at'])\n today_viewed_result = total_viewed(today_viewed, yesterday_viewed)\n today_viewed_result['total_viewed_daily'] = today_viewed_result.pop('total_viewed')\n return today_viewed_result", "def generate_daily_matrix(full_df, feat_days):\n pred_ticker = full_df.ticker.unique()[0]\n feature_tickers = [i for i in full_df.ticker.unique() if i != pred_ticker]\n dfml = full_df[full_df.ticker == pred_ticker].drop('ticker', axis=1)\n dfml.rename({'percent_change_pred': f'{pred_ticker}_percent_change_{feat_days}'}, axis=1, inplace=True)\n for ticker in feature_tickers:\n help_df = full_df[full_df.ticker == ticker][['past_date', 'current_date', 'prediction_date', 'percent_change_pred']]\n help_df.rename({'percent_change_pred': f'{ticker}_percent_change_{feat_days}'}, axis=1, inplace=True)\n dfml = pd.merge(dfml, help_df,\n left_on=['past_date', 'current_date', 'prediction_date'],\n right_on=['past_date', 'current_date', 'prediction_date'],\n how='left')\n return dfml.drop('percent_change_feat', axis=1)", "def sql_return_df(query, params, date_cols):\n conn = sqlite3.connect(db_filepath)\n df = pd.read_sql(query, conn, params=params, parse_dates=date_cols)\n conn.close()\n return df", "def run_episode(self, environment):\n dummy = pd.DataFrame({\"Date\": [\"2016-01-01\"]})\n dummy = dummy.set_index(\"Date\")\n return dummy", "def streaming_weather_data(**kwargs):\n df = weather_data(['San Francisco'])\n df['time'] = [pd.Timestamp.now()]\n return df.set_index('time')", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n#changed 'weekday_name' to just 'weekday' which outputs the weekday as integer\n # extract month, day of week and hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['dow'] = df['Start Time'].dt.weekday\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n month = months.index(month) + 1\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n# problem with the 'day'-filter, if a day (not 'all') is applied, the output is not right\n # filter by day of week if applicable\n if day != 'all':\n\n # filter by day of week to create the new dataframe\n day = days.index(day) + 1\n df = df[df['dow'] == day]\n\n return df", "def __create_data_frame(self, soup):\n self.__data_frame = pd.read_html(str(soup))[0]\n timestamp = self.__navigate_rows(soup)\n # rename dataframe columns by columns name in sqlite\n self.__data_frame = self.__data_frame.rename(\n columns=self.__columns_name)\n self.__data_frame['time'] = pd.Series(timestamp)\n self.__data_frame['chg_perc'] = self.__data_frame['chg_perc'].\\\n str.replace('%', '')\n self.__data_frame['created_date'] = datetime.now()\n # save_file(self.__name_file, self.__data_frame.to_string())", "def dst(df):\n pass", "def transform(self, incoming_df, **transform_params):\n outgoing_df = incoming_df.copy()\n outgoing_df[self.new_feature_name] = outgoing_df[self.feature].apply(self.get_time_of_day)\n return outgoing_df", "def get_dataframe_for_date(date: dt.date = INITIAL_DATE) -> pd.DataFrame:\n\n url = (\n \"https://raw.githubusercontent.com/\"\n \"CSSEGISandData/COVID-19/master/\"\n \"csse_covid_19_data/\"\n \"csse_covid_19_daily_reports/\"\n \"{date}.csv\"\n ).format(date=date.strftime(\"%m-%d-%Y\"))\n\n resp = requests.get(url)\n\n with io.StringIO(resp.text) as fp:\n return pd.read_csv(fp)", "def create_date_feature_daytime(df = None, date = None):\n df[date] = pd.to_datetime(df[date])\n df['dayOfWeek'] = df[date].dt.dayofweek\n df['dayOfMonth'] = df[date].dt.day #???\n df['year'] = df[date].dt.year\n df['month'] = df[date].dt.month\n return df", "def get_daily_historic_data(self, ticker, start_date, end_date):\n av_url = self._construct_alpha_vantage_symbol_call(ticker)\n\n try:\n av_data_js = requests.get(av_url)\n data = json.loads(av_data_js.text)['Time Series (Daily)']\n except Exception as e:\n print(\n \"Could not download AlphaVantage data for %s ticker \"\n \"(%s)...stopping.\" % (ticker, e)\n )\n return pd.DataFrame(columns=COLUMNS).set_index('Date')\n else:\n prices = []\n for date_str in sorted(data.keys()):\n date = dt.strptime(date_str, '%Y-%m-%d')\n if date < start_date or date > end_date:\n continue\n\n bar = data[date_str]\n prices.append(\n (\n date, \n float(bar['1. open']),\n float(bar['2. high']),\n float(bar['3. low']),\n float(bar['4. close']),\n int(bar['6. volume']),\n float(bar['5. adjusted close'])\n )\n )\n price_df = pd.DataFrame(prices, columns=COLUMNS).set_index('Date').sort_index()\n self._correct_back_adjusted_prices(price_df)\n return price_df", "def _dataframe_preprocess(self):\n # 1. add baisc feature like date, time in day, ....\n if self.data_type != 'porto':\n self.df['TIMESTAMP'] = self.df.apply(lambda df: df['TIMESTAMPS'][0], axis=1)\n self.df['TIME'] = pd.to_datetime(self.df['TIMESTAMP'], unit='s', utc=True)\n \n self.df.TIME = self.df.TIME.dt.tz_convert(self.timezone)\n # 2. group df for specific driver analysis\n self.grouped_df = self.df.groupby('LABEL')\n if self.count_od_info:\n if 'SD' not in self.df.columns:\n self._add_OD_info()\n self.grouped_od = self.df.groupby('SD')", "def get_outdoor_data(temp_dir,site):\n if site == 'berk':\n files_od = glob(join(temp_dir,'outdoor','20*.xlsx'))\n elif site == 'bus':\n files_od = glob(join(temp_dir,'outdoor','Busara*.csv'))\n else:\n raise NameError(site)\n\n dfs = []\n for f in files_od:\n if site == 'berk':\n this_df = pd.read_excel(f,sheet_name=0,usecols='B:D',index_col=0,parse_dates=True, header=1)\n elif site == 'bus':\n this_df = pd.read_csv(f,usecols=[0,1,2],index_col=0,parse_dates=True,header=2)\n \n # drop missing values that prevented conversion to float type\n if this_df.iloc[:,0].dtype != np.float64:\n this_df = this_df[this_df.iloc[:,0] != ' ']\n this_df = this_df.astype(np.float64)\n\n # correct for weird timezones in berkeley datalogger\n this_df = correct_tz(this_df,site)\n \n this_df.columns = ['T','RH']\n this_df.index.name = 'time'\n\n # convert to celsius\n this_df['T'] = (this_df['T'] - 32) * 5/9\n dfs.append(this_df)\n \n df_od = pd.concat(dfs)\n\n # drop duplicated measurements\n df_od = df_od[~df_od.index.duplicated(keep='last')].sort_index()\n \n # separate out into daily min,mean,max\n groups = df_od.groupby(df_od.index.date)\n dfs_od = {'all':df_od,\n 'min': groups.min(),\n 'mean': groups.mean(),\n 'max': groups.max()}\n \n for i in ['min','mean','max']:\n # remove first and last day to ignore days where we did not get full recording\n dfs_od[i] = dfs_od[i].iloc[1:-1,:]\n \n # name index so that we can merge onto multiIndex'd dataframe\n dfs_od[i].index.name = 'date'\n \n return dfs_od", "def test_fill_data_with_one_date(self):\n # date = pd.to_datetime('2015-06-30')\n date = pd.to_datetime('2011-05-09')\n print 'testing date: %s' % date.strftime('%Y-%m-%d')\n self.full_iv.get_data()\n\n # df_date = self.full_iv.df_all.query('date == %r' % date)\n # df_date = df_date[['date', 'dte', 'mark', 'strike', 'impl_vol']]\n # print df_date.sort_values(['dte', 'strike']).to_string(line_width=1000)\n\n self.full_iv.df_stock = self.full_iv.df_stock[date:date]\n df_iv = self.full_iv.calc_iv()\n\n print df_iv\n\n self.assertTrue(len(df_iv))", "def create_df(link=config.api_link, key=config.api_key, master_file = 'pvd_crime_master.csv'):\n #only want reports we don't already have, so what is the most recent date in the master\n master = pd.read_csv(master_file, nrows=1)\n most_recent = pd.to_datetime(master['reported_date'])[0]\n most_recent_format = most_recent.strftime('%Y-%m-%dT%H:%M:%S.000')\n\n headers = {'Authentication': key} #api_key\n \n query = \"SELECT * WHERE reported_date > '\"+most_recent_format+\"' ORDER BY reported_date LIMIT 13000\"\n\n params = {'$query': query}\n\n response = requests.get(link, headers=headers, params=params) #json data\n response_json = response.json() #json data as list of dictionaries\n \n #create and return pandas DataFrame of json response\n\n return pd.DataFrame(response_json)", "def clean_ts(self, df_in, date_col, ts_col):\n df_dates=[parser.parse(t) for t in df_in[date_col]]\n df_in[date_col]=df_dates\n df_in['timestamp'] = [(t - datetime(1970, 1, 1)) / \\\n timedelta(seconds=1) for t in df_dates]\n df_in[ts_col]=pd.to_numeric(df_in[ts_col], errors='coerce')\n df_in=df_in[df_in['timestamp']>=self.begin_ts]\n df_in=df_in[df_in['timestamp']<=self.end_ts]\n df_in=df_in[['timestamp', ts_col]].dropna().\\\n sort_values(by='timestamp').\\\n copy()\n df_in['date']=[datetime.utcfromtimestamp(t)\\\n for t in df_in['timestamp']]\n return df_in", "def prepare_for_influxdb(df):\n df = df.drop(columns=\"landkreis\", errors=\"ignore\") # prevent name collision in get_ags()\n df = get_ags(df)\n df[\"time\"] = df.apply(lambda x: 1000000000*int(datetime.timestamp((pd.to_datetime(x[\"timestamp\"])))), 1)\n df[\"measurement\"] = \"hystreet\"\n df[\"origin\"] = \"https://hystreet.com\"\n df = df.rename(columns={\n 'station_id': '_id',\n 'pedestrians_count': 'pedestrian_count',\n 'state': 'bundesland'\n })\n df['ags'] = pd.to_numeric(df['ags'])\n # import pdb; pdb.set_trace()\n return df", "def process_data(filename, skiprow=0):\n df = pd.read_csv(filename, encoding='big5', header=None, skiprows=skiprow)\n # drop 測站\n df.drop(1, axis=1, inplace=True)\n print('Data Loaded, preview:')\n print(df.head())\n\n data = {}\n # group data by date\n for name, ddf in df.groupby(0):\n date = [s.zfill(2) for s in name.split('/')]\n month = date[1]\n\n # drop the date\n ddf.drop(0, axis=1, inplace=True)\n\n # set index as the measure\n ddf.set_index(2, drop=True, inplace=True)\n\n # set column as month-day-hour\n ddf.columns = ['-'.join(date[1:]+[str(i).zfill(2)]) for i in range(24)]\n\n # concatenate\n if month in data:\n data[month] = pd.concat([data[month], ddf], axis=1)\n else:\n data[month] = ddf\n\n # sort the columns by datetime\n for key in data.keys():\n data[key] = data[key][data[key].columns.sort_values()]\n\n print('\\nShow data index:')\n print(data['01'].columns)\n\n return data", "def generate_dataframe(forecast, observed):\n dataframe = pd.DataFrame(columns=COLUMNS, dtype=str)\n\n # Write cumulative forecasts.\n forecast_date_str = FORECAST_DATE.strftime(\"%Y-%m-%d\")\n for cum_week in sorted(forecast.keys()):\n target_end_date = FIRST_WEEK + ((cum_week - 1) * datetime.timedelta(7)) \n target_end_date_str = target_end_date.strftime(\"%Y-%m-%d\")\n # Terminate the loop after 8 weeks of forecasts.\n if cum_week >= 8:\n break\n \n # Skip forecasts before the forecast date.\n if target_end_date <= FORECAST_DATE:\n continue\n\n # Write a row for \"weeks ahead\" if forecast end day is a Saturday.\n if target_end_date >= FIRST_WEEK and target_end_date.weekday() == 5:\n target = str(cum_week) + \" wk ahead cum death\"\n for state_id in forecast[cum_week].keys():\n for quantile in forecast[cum_week][state_id].keys():\n val = observed[(FORECAST_DATE - datetime.timedelta(1)).strftime(\"%Y-%m-%d\")][state_id]\n for i in range(1, cum_week + 1):\n val += forecast[i][state_id][quantile]\n if quantile == \"point\":\n dataframe = dataframe.append(\n generate_new_row(\n forecast_date=forecast_date_str,\n target=target,\n target_end_date=target_end_date_str,\n location=str(state_id),\n type=\"point\",\n quantile=\"NA\",\n value=val\n ), ignore_index=True)\n else:\n dataframe = dataframe.append(\n generate_new_row(\n forecast_date=forecast_date_str,\n target=target,\n target_end_date=target_end_date_str,\n location=str(state_id),\n type=\"quantile\",\n quantile=quantile,\n value=val\n ), ignore_index=True)\n \n # Write incident forecasts.\n forecast_date_str = FORECAST_DATE.strftime(\"%Y-%m-%d\")\n for cum_week in sorted(forecast.keys()):\n target_end_date = FIRST_WEEK + ((cum_week - 1) * datetime.timedelta(7)) \n target_end_date_str = target_end_date.strftime(\"%Y-%m-%d\")\n # Terminate the loop after 8 weeks of forecasts.\n if cum_week >= 8:\n break\n \n # Skip forecasts before the forecast date.\n if target_end_date <= FORECAST_DATE:\n continue\n\n if target_end_date >= FIRST_WEEK and target_end_date.weekday() == 5:\n target = str(cum_week) + \" wk ahead inc death\"\n for state_id in forecast[cum_week].keys():\n for quantile in forecast[cum_week][state_id].keys():\n if quantile == \"point\":\n dataframe = dataframe.append(\n generate_new_row(\n forecast_date=forecast_date_str,\n target=target,\n target_end_date=target_end_date_str,\n location=str(state_id),\n type=\"point\",\n quantile=\"NA\",\n value=forecast[cum_week][state_id][quantile]\n ), ignore_index=True)\n else:\n dataframe = dataframe.append(\n generate_new_row(\n forecast_date=forecast_date_str,\n target=target,\n target_end_date=target_end_date_str,\n location=str(state_id),\n type=\"quantile\",\n quantile=quantile,\n value=forecast[cum_week][state_id][quantile]\n ), ignore_index=True)\n \n return dataframe", "def convert_to_json(dataframe):\n dataframe = dataframe.set_index('YYYYMMDD').to_json('schiphol_windstoten.json', orient = 'index')", "def clean(dataframe):\n # replace 'unknown' in Pop. density with np.nan\n dataframe = dataframe.replace('unknown', np.nan)\n\n # remove spaces from column names and content\n dataframe.columns = dataframe.columns.str.strip()\n\n # change YYYYMMDD to days of the year\n date_debug = []\n for i in range(1, 366):\n date_debug.append(i)\n\n dataframe2 = {'YYYYMMDD': date_debug}\n dataframe['YYYYMMDD'] = dataframe2['YYYYMMDD']\n\n return dataframe", "def load_data(city, month, day):\n# sub program to display raw data\n\n filename = (\"{}.csv\".format(city.replace(\" \",\"_\")))\n print(filename)\n df = pd.read_csv(filename)\n\n\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n if month != 'all':\n months = ['january', 'febraury', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n df = df[df['month'] == month]\n\n\n\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n\n\n question = input(\"Type \\\"yes\\\" if you would like to see raw data or type \\\"no\\\" to continue\").lower()\n\n x = 0\n y = 5\n while question not in [\"no\",\"yes\"]:\n\n question = input(\"Please check for error in input\")\n continue\n while question not in [\"no\"]:\n out_put = df.iloc[x:y,:]\n print(out_put)\n question = input(\"Type \\\"yes\\\" if you would like to see more or \\\"no\\\" to continue\")\n x = y\n y += 5\n continue\n return df", "def get_dataframe() -> pandas.DataFrame:\n database_connection = processing.establish_connection(database_path)\n dataframe = database_connection.to_dataframe(['CustomerId', 'InvoiceDate', 'Total'], table_name)\n database_connection.close()\n dataframe = processing.get_invoice_date_fixed(dataframe)\n analyze_dataframe = dataframe.copy()\n total_sum_dataframe = processing.get_column_sum(analyze_dataframe)\n\n customer_count_dataframe = processing.drop_duplicates(analyze_dataframe)\n customer_count_dataframe = processing.get_column_count(customer_count_dataframe)\n return customer_count_dataframe.merge(total_sum_dataframe, how='inner', on='InvoiceDate')", "def _to_dataframe(self, dataset_name):\n values = self[dataset_name][:]\n columns = self.get_columns(dataset_name)\n timestamps = self.get_timestamps(dataset_name)[...]\n if len(columns) < values.shape[1]:\n columns.resize(values.shape[1])\n\n # transform missing data into NaNs\n mask = missing_values(values) != 0\n try:\n values[mask] = numpy.nan\n except ValueError: # ValueError: cannot convert float NaN to integer\n # don't bother converting non-float arrays' -0.0 into NaNs\n pass\n\n dataframe = pandas.DataFrame(data=values,\n index=[datetime.datetime.fromtimestamp(t) for t in timestamps],\n columns=columns)\n return dataframe", "def transform(self, df: DataFrame) -> DataFrame:\n return df", "def process(df, *args): #input filename\n# try:\n# pubdate = datetime.strptime(pubdate, \"%a, %d %b %Y %H:%M:%S %Z\")\n# pubdate.replace(tzinfo=pytz.timezone(\"GMT\"))\n# # pubdate = pubdate.astimezone(pytz.timezone('EST'))\n# # pubdate.replace(tzinfo=None)\n# except ValueError:\n# pubdate = datetime.strptime(pubdate, \"%a, %d %b %Y %H:%M:%S %z\")\n\n# df = pd.read_csv('20131126.csv')\n ret = []\n for index,row in df.iterrows():\n country_Code = row['Actor1CountryCode']\n sqldate = row['SQLDATE']\n month_year = row['MonthYear']\n tone = row['AvgTone']\n url = row['SOURCEURL']\n news = News(country_Code,sqldate,month_year,tone,url)\n ret.append(news)\n print('\\nThere are %d items in News.'% len(ret))\n return ret", "def create_dates_dataframe(start_date,end_date):\n DateList = [start_date]\n while max(DateList) < end_date:\n DateKey = max(DateList) + timedelta(days=1)\n DateList.append(DateKey)\n DateList.sort() \n dfDates = pd.DataFrame(pd.to_datetime(DateList), columns = ['DateKey'])\n return dfDates", "def load_up_initial_db(self, date_dict):\n df_tot = []\n for chunk in pd.read_sql_table(self.table, self.disk_engine, chunksize=10000, parse_dates=date_dict):\n df_tot.append(chunk)\n self.df = pd.concat(df_tot)", "def df():\n fs.df()", "def recognize_dates(dframe):\n for i, dtype in enumerate(dframe.dtypes):\n if dtype.type == np.object_:\n column = dframe.columns[i]\n new_column = _convert_column_to_date(dframe, column)\n\n if not new_column is None:\n dframe[column] = new_column\n\n return dframe", "def __get_updated_data(self, table_name='derivatives_economicindicatorstandard'):\n # get the latest data and add 1 day to date column\n df = self.__get_latest_data(table_name)\n df['date'] = pd.to_datetime(df['date']) + timedelta(days=1)\n df['date'] = df['date'].apply(lambda x: x.strftime('%Y-%m-%d'))\n\n # get updated data\n result = []\n df.apply(lambda row: result.append(self.__call_api_data(row, table_name)), axis=1)\n df = pd.concat(result, sort=True)\n df['Date'] = df['Date'].apply(lambda x: x.strftime('%Y-%m-%d'))\n return df", "def load_data(city, month, day):\n# load datafile into a dataframe\n df=pd.read_csv(CITY_DATA[city])\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day_name()\n \n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month)+1\n \n # filter by month to create the new dataframe\n df = df[df['month']==month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n return df", "def load_data(city, month, day):\n\n if city == 'all':\n df = pd.read_csv(CITY_DATA['chicago'])\n df['city'] = 'chicago'\n ny = pd.read_csv(CITY_DATA['new york city'])\n ny['city'] = 'new york city'\n df = df.append(ny,sort = True)\n wa = pd.read_csv(CITY_DATA['washington'])\n wa['city'] = 'washington'\n df = df.append(wa,sort = True)\n else:\n df = pd.read_csv(CITY_DATA[city])\n df['city'] = CITY_DATA[city]\n\n df['Start Time'] = pd.to_datetime(df['Start Time']) #converts Start Time to datetime\n df['End Time'] = pd.to_datetime(df['End Time']) #converts End Time to datetime\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n #print(df) #DataFrame\n\n #filter by month\n if month != 'all':\n month_name = ['january', 'february', 'march', 'april', 'may', 'june','july','august','september','october','november','december']\n month_num = month_name.index(month) + 1\n df = df[df['month'] == month_num]\n #filter by day\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n recs = df['Start Time'].count()\n\n return df, recs", "def get_obj_df():\n obj_df = pd.read_csv('data/object.csv')\n obj_df = obj_df.drop_duplicates()[['course_id', 'module_id', 'category', 'start']] \n obj_df['start'] = pd.to_datetime(obj_df[obj_df['start'] != 'null']['start'])\n return obj_df", "def read_file(fname: str) -> pd.DataFrame:\n raw_data = (\n pd.read_hdf(fname).to_frame().reset_index(level=[0, 1]).loc[ANALYSIS_DATE]\n )\n raw_data[\"date\"] = raw_data.index\n return raw_data", "def SelectDataAndPlot(df,cols=None,dateField='Date',days=7,output_format='png'):\n if cols is not None:\n dt_now = datetime.datetime.now()\n beginning = dt_now - datetime.timedelta(days=days)\n data = df[cols][df[dateField]>beginning]\n utils.makeDateSeriesPlot(data,output_format=output_format,file_name_root=str(days))\n else:\n print('cols cannot be None')", "def load_data(city, month, day):\n while month != \"\":\n # load data file into a dataframe\n filename = CITY_DATA[city]\n df = pd.read_csv(filename)\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n # df['day_of_week'] = df['Start Time'].dt.day_name()\n\n\n try: df['day_of_week'] = df['Start Time'].dt.weekday_name\n except: df['day_of_week'] = df['Start Time'].dt.day_name()\n else: df['day_of_week'] = df['Start Time'].dt.weekday\n \n \n \n df['hour'] = df['Start Time'].dt.hour\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n # months = ['january', 'february', 'march', 'april', 'may', 'june','july','august','september','october','november','december']\n month = int(months.index(month)) + 1\n \n # filter by month to create the new dataframe\n df = df.loc[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df.loc[df['day_of_week'] == day.title()]\n \n return df", "def get_data(symbols, dates, base_dir=\"../data/\"):\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols:\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n temp_df = pd.read_csv(symbol_to_path(symbol, base_dir), index_col='Date',\n parse_dates=True, usecols=['Date', 'Close'],\n na_values=['nan'])\n temp_df = temp_df.rename(columns={'Close': symbol})\n df = df.join(temp_df, how='inner')\n return df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n\n \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n # to_datetime command is used to convert(change) date into date format\n df['End Time'] = pd.to_datetime(df['End Time'])\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n #used to find index of month.\n month = months.index(month) + 1 \n\n df = df[df['Start Time'].dt.month == month]\n \n #filter data by day.\n if day != 'all': \n df = df[df['Start Time'].dt.weekday_name == day.title()]\n #print 5 rows.\n print(df.head())\n return df", "def get_events(filename):\n\tdf = pd.read_csv(filename)\n\t#get date from first entry (row) of DateUTC column\n\tdf['date'] = df['DateUTC<br />'][0].split(' ')[0]\n\t\n\t\n\t#drop the following columns\n\tdropLabels = ['FullMetar', 'DateUTC<br />', \\\n\t'Wind Direction','Gust SpeedMPH', \\\n\t'WindDirDegrees', 'Sea Level PressureIn', 'Dew PointF', \\\n\t'TemperatureF', 'Humidity','VisibilityMPH', \\\n 'Wind SpeedMPH', 'PrecipitationIn']\n\n\tdf.drop(labels=dropLabels,axis=1,inplace=True)\n\t\n\t#add hour column\n\ttimeLabel = df.columns.values[0] \n\tdf['Hour'] = pd.to_datetime(df[timeLabel]).dt.hour\n\t#drop timelabel column since we don't use anything beyond hour\n\tdf.drop(labels=timeLabel,axis=1,inplace=True)\n\n\treturn df", "def clean_df(dataframe: pd.DataFrame) -> pd.DataFrame:\n dataframe[\"Close Date\"] = pd.to_datetime(dataframe['Close Date']).dt.strftime('%Y-%m-%d')\n dataframe[\"Min_salary\"] = dataframe[\"Min_salary\"].astype(int)\n dataframe[\"Max_salary\"] = dataframe[\"Max_salary\"].astype(int)\n dataframe['HiringPath'] = dataframe['HiringPath'].astype(str)\n return dataframe", "def setup_df(ticker):\n stock = yf.Ticker(ticker)\n df = stock.history(period=\"max\")\n del df['Dividends']\n del df['Stock Splits']\n return df", "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols: # add SPY for reference, if absent\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n # TODO: Read and join data for each symbol\n if os.path.isfile(symbol_to_path(symbol)): \n df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date', \n parse_dates = True, usecols=['Date', 'Adj Close'], na_values=['nan'])\n df_temp = df_temp.rename(columns = {'Adj Close': symbol})\n df = df.join(df_temp)\n if symbol == 'SPY': #drop dates SPY did not trade\n df = df.dropna(subset=[\"SPY\"])\n# else:\n# download_symbol(symbol) \n return df", "def data_preprocessing(dataset):\r\n df = pd.read_csv(dataset)\r\n df.head()\r\n df.describe()\r\n df.isnull().sum()\r\n df= df.drop(['instant'], axis=1)\r\n df['dteday'] = pd.to_datetime(df['dteday'].apply(str) + ' ' + df['hr'].apply(str) + ':00:00')\r\n return df", "def filter(df: pd.DataFrame = pd.DataFrame()):\n if df.empty:\n df = read()\n print('Filtering data...')\n df = df.dropna()\n df2 = pd.DataFrame()\n df2['Longitude'] = df['Longitude']\n df2['Latitude'] = df['Latitude']\n df2['Month'] = df['Date'].dt.strftime('%m').astype(int)\n df2['Day'] = df['Date'].dt.strftime('%d').astype(int)\n df2['Day_of_Week'] = df['Day_of_Week']\n df2['Time'] = np.array([t.timestamp() for t in df['Time']]) - df['Time'].min().timestamp()\n df2['Weather_Conditions'] = df['Weather_Conditions']\n return pd.get_dummies(df2)", "def get_date_in_days(raw_data, target_columns=['Submitby Date Time', 'Posting Date Date']):\r\n output = raw_data.copy()\r\n\r\n for column in target_columns:\r\n for i in range(len(raw_data)):\r\n date = datetime.date(output.loc[i, column+' Year'], output.loc[i, column+' Month'], output.loc[i, column+' Day'])\r\n output.loc[i, column+' Days from 2016'] = (date-datetime.date(2016, 1, 1)).days\r\n\r\n return output", "def get_data(symbols, dates):\r\n df = pd.DataFrame(index=dates)\r\n if 'SPY' not in symbols: # add SPY for reference, if absent\r\n symbols.insert(0, 'SPY')\r\n\r\n for symbol in symbols:\r\n df_temp = pd.read_csv(symbol_to_path(symbol), index_col=\"Date\", parse_dates=True,\r\n usecols=['Date', 'Adj Close'], na_values=['nan'])\r\n\r\n # rename to prevent clash\r\n df_temp = df_temp.rename(columns={'Adj Close': symbol})\r\n df = df.join(df_temp)\r\n if symbol == 'SPY': #drop dates SPY did not trade\r\n df = df.dropna(subset=[\"SPY\"])\r\n\r\n return df", "def data_from_dataframe(self, dataframe):\n self.dataframe = dataframe.drop_duplicates()\n #Convert numerical values into float type\n self.dataframe.apply(pandas.to_numeric, errors='ignore')\n #Convert timestamps into regular dates\n time_range = [datetime.datetime.fromtimestamp(time) for time in list(self.dataframe['time'])]\n beg = time_range[0]\n end = time_range[len(time_range)-1]\n #Attribute begining and ending dates\n self.beg = beg\n self.end = end", "def load_data(city, month, day):\n #load the data of the specified city in a dataframe:\n df = pd.read_csv(CITY_DATA[city])\n\n #convert the type of data in 'Start Time' column to datetime:\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n #create new columns required to calculate time_stats:\n df['month'] = df['Start Time'].dt.month\n df['weekday'] = df['Start Time'].dt.day_name()\n df['hour'] = df['Start Time'].dt.hour\n\n #unless user input is all, filter by month:\n if month != 'all':\n month = months.index(month) + 1 #get the index of the month\n df = df[df['month'] == month]\n\n #uless user input is all, filter by weekday:\n if day != 'all':\n df = df[df['weekday'] == day.title()]\n\n\n return df.set_index(pd.Series([i for i in range(df.shape[0])])) #reset the indices of the filterd df", "def data_input(path, complete=False, nrows=10000):\n\n if complete:\n df = pd.read_csv(path)\n\n else:\n df = pd.read_csv(path, nrows=nrows)\n df[\"date_time\"] = pd.to_datetime(\n df[\"date_time\"], format=\"%Y-%m-%d %H:%M:%S\")\n\n #Maybe we could get rid of the exact timestamp if not useful\n #-> .apply(lambda x: x.date())\n return df", "def get_data(table_name, end, num, start=None):\n if start == None:\n if table_name == \"days\": start = end - timedelta(days=num-1) \n if table_name == \"weeks\": start = end - timedelta(weeks=num-1) \n if table_name == \"months\": start = end - relativedelta(months=+num-1) \n if table_name == \"years\": start = end - relativedelta(years=+num-1) \n else: \n start = days.get_entry(table_name, start).date\n \n dates = []\n data = []\n weather = []\n density = []\n \n while start <= end:\n entry = days.get_entry(table_name, start)\n data.append(entry.sentiment)\n \n if table_name == \"days\": \n dates.append(entry.date.strftime(\"%B %d, %Y\"))\n start = start + timedelta(days=1)\n if table_name == \"weeks\": \n dates.append(entry.date.strftime(\"%B %d, %Y\"))\n start = start + timedelta(weeks=1) \n if table_name == \"months\": \n dates.append(entry.date.strftime(\"%B %Y\"))\n start = start + relativedelta(months=+1) \n if table_name == \"years\": \n dates.append(entry.date.strftime(\"%Y\"))\n start = start + relativedelta(years=+1) \n\n # 7/15/15 is the last entry in the current weather dictionary\n num_days = (min(start, date(2015,7,15)) - entry.date).days\n temp = {entry.date + timedelta(days=i): weather_dict[entry.date + timedelta(days=i)] for i in range(num_days)}\n weather.append(float(sum(temp.values()))/float(len(temp)))\n\n if density_dict != None:\n d = max(entry.date, date(2014,7,1))\n num_days = (min(start, date(2015,7,28)) - d).days\n rho = {d + timedelta(days=i): density_dict[d + timedelta(days=i)] for i in range(num_days)}\n density.append(float(sum(rho.values()))/float(len(rho)))\n\n return dates, data, weather, density", "def YahooFinancials_Data(Ticker=[],Start='',End ='',Frequency ='daily'):\n\n\n \n import pandas as pd\n from yahoofinancials import YahooFinancials\n import datetime as dt \n \n Ticker = Ticker or input(\"Enter Tcikers separated by',': \").split(',')\n Start = Start or input(\"Enter Start Date separated by '-': \") or (dt.date.today()-\n dt.timedelta(1825)).strftime(\"%Y-%m-%d\")\n End = End or input(\"Enter End Date separated by '-': \") or (dt.date.today()).strftime(\"%Y-%m-%d\")\n Frequency = Frequency or input(\"Enter Frequency like 'daily','weekly': \") or 'daily'\n \n data = pd.DataFrame()\n for i in range(len(Ticker)):\n try:\n yahoo_financials = YahooFinancials(Ticker[i])\n Json_obj = yahoo_financials.get_historical_price_data(Start, End, Frequency)\n Ohlv = Json_obj[Ticker[i]]['prices']\n temp = pd.DataFrame(Ohlv)[[\"formatted_date\",\"adjclose\"]]\n temp.set_index(\"formatted_date\", inplace = True)\n temp = temp[~temp.index.duplicated(keep = 'first')]\n data[Ticker[i]] = temp['adjclose']\n \n except:\n print(f\"Unable to get the Data for: {Ticker[i]}\")\n continue\n \n return data", "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols: # add SPY for reference, if absent\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n # TODO: Read and join data for each symbol\n df_temp = pd.read_csv(symbol_to_path(symbol),\n index_col=\"Date\",\n parse_dates=True,\n usecols=['Date', 'Adj Close'],\n na_values=['nan'])\n df_temp = df_temp.rename(columns={'Adj Close': symbol})\n df = df.join(df_temp)\n\n df = df.dropna()\n return df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city]) # similar to practiceQ3, load data file into a dataframe\n\n df['Start Time'] = pd.to_datetime(df['Start Time']) # similar to practiceQ3, convert the \"Start Time\" column to to_datetime YY\n\n # similar to practiceQ3, extract month and day of week from \"Start Time\" to create new columns YY\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day_name()\n\n # filter by month, if applicable\n if month != 'all':\n # use the index of months list to get the corresponding int YY\n\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create new dataframe\n df = df[df['month'] == month]\n\n #filter by day of week, if applicable\n if day != 'all':\n #filter by day of week to create the new dataframe YY\n df = df[df['day_of_week'] == day.title()]\n\n\n return df", "def transform(self, y=None):\n\n day_features = [\n \"01/06\",\n \"01/14\",\n \"02/02\",\n \"02/07\",\n \"03/01\",\n \"03/10\",\n \"03/20\",\n \"03/26\",\n \"04/01\",\n \"04/08\",\n \"04/17\",\n \"04/18\",\n \"05/06\",\n \"05/09\",\n \"05/10\",\n \"05/21\",\n \"05/31\",\n \"06/04\",\n \"06/05\",\n \"06/13\",\n \"06/28\",\n \"06/30\",\n \"07/09\",\n \"07/16\",\n \"07/20\",\n \"07/23\",\n \"07/29\",\n \"08/01\",\n \"08/20\",\n \"08/24\",\n \"08/28\",\n \"09/08\",\n \"09/11\",\n \"09/14\",\n \"09/30\",\n \"10/02\",\n \"10/17\",\n \"10/26\",\n \"11/01\",\n \"11/15\",\n \"11/16\",\n \"11/23\",\n \"11/27\",\n \"12/04\",\n \"12/12\",\n \"12/19\",\n \"12/21\",\n \"12/23\",\n \"12/29\",\n ]\n spec_days = pd.DataFrame({\"month_day\": day_features, \"spec_day\": day_features})\n df = self.X.join(spec_days.set_index(\"month_day\"), on=\"month_day\")\n\n date_dummies = (\n pd.get_dummies(\n df[[\"date\", \"day\", \"month_weekday\", \"spec_day\"]].set_index(\"date\"),\n columns=[\"day\", \"month_weekday\", \"spec_day\"],\n )\n .reset_index()\n .drop_duplicates()\n )\n return df.join(date_dummies.set_index(\"date\"), on=\"date\").fillna(0)", "def process_stocks(s3_working_bucket: str, date: tuple) -> DataFrame:\n\n logging.debug(\"Start reading stocks csv.\")\n df_stocks = stdm.read_csv(spark, s3_working_bucket, date, \"stocks\")\n\n logging.debug(\"Calling gmt_unix_to_datetime function.\")\n df_stocks = stp.gmt_unix_to_datetime(df_stocks, \"timestamp\")\n\n logging.debug(\"Calling order_by_col function.\")\n df_stocks = stp.order_by_col(df_stocks, \"datetime\")\n\n return df_stocks", "def prepare_data(filename='data/DOT_timeSeries.csv'):\n\n # read data file into pandas dataframe\n df = pd.read_csv(filename)\n\n # extract unwanted 'countries' from dataframe\n countries = ['Europe', 'Emerging and Developing Europe', 'Emerging and Developing Asia',\n 'Middle East, North Africa, and Pakistan', 'Export earnings: nonfuel',\n 'Sub-Saharan Africa', 'Export earnings: fuel', 'Western Hemisphere',\n 'World', 'Special Categories', 'Advanced Economies', 'CIS',\n 'Emerging and Developing Economies']\n for country in countries:\n df = extract_relevant_rows(df, column_name='Country Name', column_value=country, not_equal=True)\n df = extract_relevant_rows(df, column_name='Counterpart Country Name', column_value=country, not_equal=True)\n\n # extract exports only from data\n exports = extract_relevant_rows(df, column_name='Indicator Code', column_value='TXG_FOB_USD')\n # extract value attributes only from exports\n export_values = extract_relevant_rows(exports, column_name='Attribute', column_value='Value')\n\n return export_values", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n #print(df.head())\n \n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time']) \n #print(df['Start Time'].head())\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day_name()\n #print(df['month'].head())\n #print(df['day_of_week'].head())\n \n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n #print((months[month]))\n #print(month)\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n #print(df['month'].head())\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n #print(day.title())\n #print(df.head())\n\n return df", "def read_csv():\n csv_file = \"dow.csv\"\n\n # read the data from the csv file, parsing the Dates to make the x-axis, setting index_col to zero to remove it\n data_frame = pd.read_csv(csv_file, parse_dates=True, index_col=0)\n return data_frame", "def augment_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:", "def get_daily_data_from_stooq(ticker_symbol, start_date, end_date):\n # check whether the start_date and end_date are strings\n if isinstance(start_date, str) and isinstance(end_date, str):\n pass\n else:\n raise ValueError(\"Dates passed to the function are not strings!!!\")\n # validate formats of dates passed to the function\n validate_date_format_yyy_mm_dd(start_date)\n print(\"Validation of start_date format result: positive...\")\n validate_date_format_yyy_mm_dd(end_date)\n print(\"Validation of end_date format result: positive...\")\n d_1 = start_date.replace(\"-\", \"\")\n d_2 = end_date.replace(\"-\", \"\")\n temp_url = \"https://stooq.com/q/d/l/?s=\" + ticker_symbol + \"&d1=\" \\\n + d_1 + \"&d2=\" + d_2 + \"&i=d\"\n print(\"Getting data from URL: \", temp_url)\n # try-except block to catch the cases when the ticker symbol is nonexistent\n try:\n data_in = pd.read_csv(temp_url, usecols=['Date', 'Close'],\n parse_dates=[0])\n except ValueError:\n print(\"ValueError occurred! Probably a nonexistent ticker has been\"\n \" passed to the function\")\n except Exception:\n print(\"General error has occurred! Please check function arguments...\")\n else:\n # if data is obtained, rename \"Close\" ===> ticker name\n data_in.rename(columns={\"Close\": ticker_symbol}, inplace=True)\n return data_in", "def stock_data(ticker, start,today=date.today()):\n df= web.DataReader(ticker,'yahoo',start,today)\n return df", "def load_data(city, month, day):\n file_data = pd.read_csv(CITY_DATA[city])\n df = pd.DataFrame(data=file_data)\n\n # weekday and month columns\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n df['month'] = df['Start Time'].dt.month\n\n df['weekend_day'] = df['Start Time'].dt.weekday\n\n if month != 'all':\n df = df[df['month']==Months.index(month) + 1]\n\n\n if day != 'all':\n df = df[df['weekend_day'] == day.title()]\n\n\n return df", "def daily_new_cases(start_date, end_date, lga_name, columnname_lga = COLUMN_LGA, columnname_cases = COLUMN_CASES, excel_file_name = EXCEL_FILE_NAME):\n\n start_date = dt.strptime(start_date, '%m-%d') \n end_date = dt.strptime(end_date, '%m-%d')\n \n # data to be drawn in the line graph\n x_date = []\n y_cases = []\n\n with open(EXCEL_FILE_NAME, 'rb') as f:\n df = pd.read_excel(f, index=False)\n\n # keep updating current_date in for-loop to find the right column in excel\n # change data type to str and parse index 5-9 to get the format of 'mm-dd'\n current_date = str(start_date)[5:10]\n no_of_days = end_date - start_date\n for i in range (no_of_days.days + 1):\n column_name = columnname_cases + current_date\n\n x_date.append(current_date)\n y_cases.append(int(df[column_name].loc[df[columnname_lga] == lga_name].to_list()[0]))\n\n current_date = dt.strptime(current_date, '%m-%d') + td(days=1) \n current_date = str(current_date)[5:10]\n\n data = []\n data.append(x_date)\n data.append(y_cases)\n return data", "def convert_to_daily(data_list):\n for _in in range(1, len(data_list)):\n data_list[-_in] = data_list[-_in] - data_list[-_in - 1]", "def load_data(city, month, day):\n # here i load the datak\n df=pd.read_csv(CITY_DATA[city])\n \n df['Start Time']=pd.to_datetime(df['Start Time'])\n \n df['month']=df['Start Time'].dt.month\n df['day_of_week']=df['Start Time'].dt.weekday_name\n df['hour']=df['Start Time'].dt.hour\n \n #filter by month\n if month!='all':\n month =months.index(month)+1\n df=df[df['month']==month]\n \n #filter by day of week\n if day!='all':\n df=df[df['day_of_week']==day.title()]\n \n return df" ]
[ "0.65267056", "0.62918127", "0.62888753", "0.62433565", "0.6169422", "0.61276734", "0.6091886", "0.609138", "0.60746336", "0.6005586", "0.6000989", "0.59937394", "0.59874874", "0.59397405", "0.59353983", "0.5922037", "0.5900342", "0.589187", "0.5853567", "0.58485276", "0.58252066", "0.5824473", "0.58242244", "0.58203954", "0.5817477", "0.5816937", "0.579279", "0.5784105", "0.5775284", "0.5768148", "0.5749051", "0.5745817", "0.5742192", "0.57353526", "0.57297313", "0.5727689", "0.57238483", "0.56943876", "0.56941944", "0.5692726", "0.5688125", "0.5673238", "0.5661743", "0.5651674", "0.56506824", "0.5639899", "0.56148803", "0.5609152", "0.5603825", "0.5594777", "0.55933803", "0.5587203", "0.5581158", "0.5577691", "0.55709296", "0.5568189", "0.5566536", "0.55620235", "0.55585724", "0.555675", "0.5556465", "0.5550131", "0.55499893", "0.5542853", "0.5532803", "0.55296636", "0.552848", "0.55249244", "0.55198836", "0.55174965", "0.55117625", "0.5510185", "0.550605", "0.55031884", "0.5502539", "0.54800034", "0.5479648", "0.547353", "0.5468944", "0.54675645", "0.5464622", "0.5459274", "0.54587805", "0.545634", "0.54499215", "0.54378754", "0.54319894", "0.5428581", "0.54233825", "0.5416944", "0.5415726", "0.5414517", "0.54129845", "0.54124135", "0.5411232", "0.5410622", "0.5405415", "0.539884", "0.5398141", "0.53949696", "0.5392542" ]
0.0
-1
Returns the ROS time in seconds
def get_time(cls): now = rospy.Time.now() return now.secs + now.nsecs*(10**-9) # time in seconds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gettime():\n return libruss.russ_gettime()", "def get_time(self):\n return self.get_timed() / 10.0", "def _current_time_seconds(self):\n return int(round(time.time()))", "def currentTimeSecs():\n return time.time()", "def get_time(self) -> float:\n self.rocket.update()\n return self.rocket.time", "def elapsed_time_in_seconds(self):\n return self._elapsed_time_in_seconds", "def current_time_seconds(self):\n return int(round(time.time()))", "def get_time(self):\n clock = self.pipeline.get_clock()\n tm = clock.get_internal_time()\n return tm / 1.e9", "def time(self) -> float:\n return self.sim_scene.data.time", "def service_time(self):\r\n #print self.node_monitor_address, self.completion_time - self.node_monitor_launch_time\r\n return (self.completion_time - self.node_monitor_launch_time)", "def now():\n\n return rospy.Time.now().to_nsec()", "def sec(self):\n # my clock uses seconds internally\n return self.read()", "def recv_time(self) -> float:\n return ntp_to_system_time(self.recv_timestamp)", "def time(self):\n raise \"use method time of class ReactorNet\"\n #return _cantera.reactor_time(self.__reactor_id)", "def time(self):\n return _cantera.reactornet_time(self.__reactornet_id)", "def timeTime(self):\n return self._micros / 1000000.0", "def time():\n master = MasterTimer.getMasterTimer()\n\n if master.end_time:\n return master.end_time - master.start_time\n else:\n return time.time() - master.start_time", "def service_time(self):\r\n return (self.completion_time - self.node_monitor_launch_time)", "def time(self) -> int:\n return int(round(time.time() * 1000))", "def seconds(self):\n end = self.end or timezone.now()\n result = end - self.start\n return result.seconds", "def time(self) -> int:\n return self.__droneTime", "def get_time(self):\n return self._current_time_sec", "def get_time_taken_sec(self) -> float:\n return self.time_stop - self.time_start", "def get_time(self):\n return self._total_time", "def seconds(self):\n return int(self)", "def ms(self):\n # my clock uses seconds internally\n return 1000 * self.read()", "def t_sec(self):\n return self.t/self.parameters['time_conversion']", "def get_elapsed_seconds():\n\tutcnow = datetime.utcnow()\n\tmidnight_utc = datetime.combine(utcnow.date(), time(0))\n\tdelta = utcnow - midnight_utc\n\treturn delta.total_seconds()", "def secondsPassed(self)->int:\n return self._lic.params['sessionTimeUsed'].value", "def time_since_last_state_change(self):\n current_time = rospy.get_rostime()\n difference = current_time - self._timestamps['last_state_change']\n return difference.to_sec()", "def _time(self):\n return self.r.eval(self.LUA_TIME, 1, 1)", "def time(self):\n return self._clock() - self._starttime", "def get_real_secs(self):\n return _uhd_swig.time_spec_t_get_real_secs(self)", "def get_time_ms():\n return int(round(time.time() * 1000))", "def total_seconds(self):\n return 0", "def seconds_from_last_update(self):\n return (datetime.utcnow() - self.last_update_datetime).total_seconds()", "def get_elapsed_time(self):\n if hasattr(self, 'starttime'):\n return monotonic() - self.starttime\n else:\n return 0", "def get_time(self) -> float:\n raise NotImplementedError()", "def time(self) -> float:\n return self._time", "def elapsed_time():\r\n elapsed_time, duration = video_time()\r\n return elapsed_time", "def round_trip_time(self):\r\n return self.completion_time - self.launch_time", "def get_seconds(self):\n return self.seconds_remaining", "def seconds(self):\n return self._seconds", "def seconds(self):\n return self._seconds", "def runtime(self):\n return (self.time - self.start).total_seconds()", "def getTime(self) -> float:\n return self.t", "def time(self) -> int:\n return self.raw[\"time\"]", "def elapsed(self):\n return str(datetime.datetime.now() - self.start).split('.')[0]", "def tunnel2_rekey_margin_time_seconds(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"tunnel2_rekey_margin_time_seconds\")", "def estimate_reaction_time(self, R):\n # Transferred to json\n # estimated reaction time in s\n e_time = 360.\n return e_time", "def get_system_time(self):\n\t\treturn call_sdk_function('PrlStatCpu_GetSystemTime', self.handle)", "def getTime():\n\n return float(time.perf_counter()*1000)", "def elapsed_time(self) -> float:\n current_time = datetime.utcnow()\n start = self.start_time or current_time\n end = self.end_time or current_time\n return (end - start).total_seconds()", "def get_total_time(self):\n\t\treturn call_sdk_function('PrlStatCpu_GetTotalTime', self.handle)", "def get_time(self) -> float:\n return self.player.time", "def getTimeLeftSec(self):\n if self.sess is None: return 0\n since = self.sess.data.get('validSince')\n if not since: return 0\n\n sofar = time.time() - since\n if sofar < 0: return 0\n out = self.sess.data.get('validLifetime', 0) - sofar\n if out < 0: out = 0\n return out", "def elapsed_time(self):\n return self.__elapsed_time", "def crtime(self):\n return safeInt(self.tag(\"crtime\"))", "def time(self):\n\t\treturn self._time", "def total_seconds(self):\n return (\n (self.days * 86400 + self.seconds) * 10**6 + self.microseconds\n ) / 10**6", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def time(self) -> float:\n return self.state.game_loop / 22.4 # / (1/1.4) * (1/16)", "def sweep_time(self):\n time = self._pna.query('sense{}:sweep:time?'.format(self._channel))\n if time:\n return float(time)\n else:\n raise InstrIOError(cleandoc('''Agilent PNA did not return the\n channel {} sweep point number'''.format(self._channel)))", "def elapsed_time(self) -> 'outputs.DurationResponse':\n return pulumi.get(self, \"elapsed_time\")", "def sim_step(self):\n return traci.simulation.getCurrentTime()/1000 # milliseconds to seconds", "def tunnel1_rekey_margin_time_seconds(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"tunnel1_rekey_margin_time_seconds\")", "def time_passed(self):\n return (datetime.now(timezone.utc) - self._time_run).total_seconds()", "def curTimeMs():\n\treturn int((datetime.utcnow() - datetime(1970,1,1)).total_seconds() * 1000)", "def get_timed(self):\n ret = self.send(\"?T\", recv=True)\n ret = int(ret, 10)\n # FIXME: range?\n assert 1 <= ret <= 9999\n return ret", "def time(self):\n # type: () -> int\n return self._time", "def realtime():\n return timemodule.time()", "def get_time_in_round() -> int:\n # FIXME - returning negative value for projectiles\n return store.round_time", "def time(self):\n try:\n if self.single_date:\n return self.stime\n else:\n return self.stime + (self.etime - self.stime) / 2\n except TypeError:\n return None", "def elapsed():\n global start_time\n return time.time() - start_time", "def time(self) -> int:\n pass", "def tunnel2_rekey_margin_time_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"tunnel2_rekey_margin_time_seconds\")", "def tunnel2_rekey_margin_time_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"tunnel2_rekey_margin_time_seconds\")", "def time_of_possession(self):\n return self._time_of_possession", "def time_difference_in_seconds(readings: List[Reading]) -> int:\n initial, final = latest_two_readings(readings)\n seconds = (final.timestamp - initial.timestamp).total_seconds()\n if seconds == 0:\n raise UsageEstimatorError(\"Two readings taken at the exact same second\")\n\n return int(seconds)", "def time(self):\n return sum(self._interval) * .5", "def time(self):\n return pygame.time.get_ticks() - self.start_time", "def elapsed(self):\n done, data1 = self._request('GS')\n if done:\n if data1[0] != '3':\n raise NotCharging\n done, data2 = self._request('GU')\n if done:\n return {\n 'seconds': int(data1[1]),\n 'Wh': float(data2[0])/3600\n }\n raise EvseError", "def readout_time(self):\n time = ct.c_float()\n self.lib.GetReadOutTime(ct.pointer(time))\n return time.value", "def orig_time(self) -> float:\n return ntp_to_system_time(self.orig_timestamp)", "def get_time(self):\n\t\treturn time.time()", "def get_time_elapsed(self):\n return self.__time_elapsed", "def get_user_time(self):\n\t\treturn call_sdk_function('PrlStatCpu_GetUserTime', self.handle)", "def session_time(self):\n\n time_in_seconds = time.time() - self.__start_time\n return time.strftime('%H:%M:%S', time.gmtime(time_in_seconds))", "def actual_time():\n return _time.time()", "def run_time_sec(self):\n if self.job_updated_at is not None:\n return (self.job_updated_at - self.created_at).total_seconds()\n\n return None", "def _get_time(self, sec, nsec):\n return sec + nsec / (10**9)", "def virtual_time(self):\n return (_time.time() - PROTOCOL_START_TIME) / ROUND_DURATION", "def time_return(self):\n return self.time", "def unit_sec(self):\n return self.time_base / 60.0", "def get_time(self) -> int:\n t = str(self.eval(\"pyb.RTC().datetime()\").encode(\"utf-8\"))[1:-1].split(\", \")\n return int(t[4]) * 3600 + int(t[5]) * 60 + int(t[6])" ]
[ "0.75012565", "0.72696334", "0.704129", "0.704039", "0.70382655", "0.69805497", "0.6978949", "0.6969247", "0.69403255", "0.69107735", "0.6896041", "0.6878848", "0.685984", "0.67993206", "0.67879426", "0.6787236", "0.67727417", "0.6743631", "0.6704049", "0.6703564", "0.6699629", "0.6689523", "0.6672855", "0.66515297", "0.66457087", "0.6624357", "0.66218835", "0.66137534", "0.65992653", "0.6598738", "0.6573984", "0.65708095", "0.65680254", "0.65616435", "0.65500367", "0.6522175", "0.65202194", "0.65124166", "0.6509996", "0.64893454", "0.6489086", "0.64856315", "0.64790714", "0.64790714", "0.6465836", "0.64609694", "0.6456011", "0.6436245", "0.643576", "0.64295876", "0.6428209", "0.64278746", "0.64264154", "0.6416653", "0.6413103", "0.64080876", "0.640024", "0.6396106", "0.63889635", "0.6386998", "0.6373501", "0.6373501", "0.6373501", "0.6373501", "0.6373501", "0.6373501", "0.63633615", "0.6360018", "0.6353283", "0.63401365", "0.63349897", "0.6332464", "0.6330173", "0.63283175", "0.632673", "0.6325301", "0.6317536", "0.63158435", "0.631271", "0.63112533", "0.6305585", "0.6305585", "0.6298152", "0.62957084", "0.62951535", "0.62943953", "0.62849635", "0.6281249", "0.6279948", "0.62594867", "0.62576693", "0.62505054", "0.62458175", "0.62436014", "0.62426347", "0.6241817", "0.6240807", "0.6234155", "0.6226159", "0.62250155" ]
0.7948383
0
Creates the log directories for the runs and saves initial run info
def instantiate_logs(self): # Log file timestamp = datetime.now().strftime("%Y-%m-%dT%H%M%S") self.log_dir = os.path.join("experiment_logs", timestamp) # Create Log directory if it does not exist try: os.makedirs(self.log_dir) except OSError as exception: if exception.errno != errno.EEXIST: raise self.info_file = os.path.join(self.log_dir, "run_info.txt") self.log_file = os.path.join(self.log_dir, "data.csv") with open(self.info_file, "w+") as f: f.write("Period = {}\nMaxVel = {}".format(self.period, self.max_vel)) self.log_file_desc = open(self.log_file, "w+") self.log_file_desc.write("t, current_vel, current_h_angle, current_v_angle, x, y, z, roll, pitch, yaw")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _setup_dir(self):\n if not os.path.exists(self._save_dir):\n logger.info(\"save_dir {} does not exist, \"\n \"creating it\".format(self._save_dir))\n os.makedirs(self._save_dir)\n\n # Log the run parameters.\n logger.info(\"Writing logs to {}\".format(self._log_dir))\n\n if not os.path.exists(self._log_dir):\n logger.info(\"log path {} does not exist, \"\n \"creating it\".format(self._log_dir))\n os.makedirs(self._log_dir)", "def init_logs():\n\n #Ensure that the directories are made\n make_dirs()\n\n #Create FileHandler logging handler, set it's log level, configure the log storage format,\n # and add the formatter to the root logger\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n logging.root.addHandler(fh)\n logging.root.setLevel(logging.INFO)\n\n #Report it to the world!\n logging.info(\"Saving logs to \" + log_file)", "def begin(self):\n os.mkdir(self.meta)\n\n self.logname = os.path.join(self.rundir, self.meta, 'log')\n self.logfile = open(self.logname, 'a')\n if settings.verbosity >= 3:\n self.logfile = Tee(self.logfile)\n\n if self.test.setup:\n self.setup_script = self._make_setup_script()\n self.steps_script = self._make_steps_script()\n if self.test.teardown:\n self.teardown_script = self._make_teardown_script()", "def createLogFolders():\n os.chdir(\"ARCHIVES\")\n logFolder = datetime.datetime.now().strftime(\"ARCHIVE_%d_%b_%Y_%H_%M_%S_0\")\n while logFolder in os.listdir():\n split = logFolder.split('_')\n curIndex = int(split[7])\n nextIndex = curIndex + 1\n split[7] = str(nextIndex)\n logFolder = '_'.join(split)\n os.mkdir(logFolder)\n os.chdir(logFolder)\n os.mkdir(\"Premigration\")\n os.mkdir(\"Migration\")\n os.mkdir(\"Postmigration\")\n os.mkdir(\"Other\")\n print(\"Storing All Logs in ARCHIVES/%s\"%logFolder)\n globs.ARCHIVEFOLDER = os.getcwd()\n os.chdir(globs.PROGDIR)", "def init_log():\n os.system('rm -rf /target/testdriver.log || true')\n os.system('touch /target/testdriver.log')\n os.system(f\"chown {uid_gid_output} /target/testdriver.log\")\n os.system('chmod 664 /target/testdriver.log')", "def init_logs_directory(self):\n \n return self.join_and_init_path(self.get_data_general_directory, PATH_FOR_LOGS)", "def init(directory,name):\n \n import os,time\n fname=os.path.join(directory,'Log_'+name+'_'+time.strftime('%Y-%b-%d')+'.txt')\n if not os.path.isfile(os.path.join(directory,fname)):\n try:\n fhandle = open(fname, 'a')\n fhandle.write('-----------------------------------------------------\\n')\n fhandle.write('Program initialized! on '+time.strftime('%Y-%b-%d')+'\\n')\n fhandle.write('-----------------------------------------------------\\n')\n print('======begin log!======')\n return(fhandle)\n except:\n print('Could not create Log file with name : '+\n ' Log_'+name+'_'+date.today().isoformat()+'.txt')\n else:\n fhandle=open(fname,'a')\n fhandle.write('-----------------------------------------------------\\n')\n fhandle.write('Program initialized! on '+time.strftime('%Y-%b-%d')+'\\n')\n fhandle.write('-----------------------------------------------------\\n')\n print('======begin log!======')\n return(fhandle)", "def before_run(self, runner) -> None:\n if self.out_dir is not None:\n # The final `self.out_dir` is the concatenation of `self.out_dir`\n # and the last level directory of `runner.work_dir`\n basename = osp.basename(runner.work_dir.rstrip(osp.sep))\n self.out_dir = self.file_backend.join_path(self.out_dir, basename)\n runner.logger.info(\n f'Text logs will be saved to {self.out_dir} after the '\n 'training process.')\n\n self.json_log_path = f'{runner.timestamp}.json'", "def init_log_files(self): \n \n dir_path = self.init_logs_directory()\n log_files = self.join_path(dir_path, PATH_FOR_LOG_FILES)\n \n return log_files", "def init_logger():\n logpath = Path(f\"logs/{time.strftime('%Y.%m.%d %H:%M')}.txt\")\n logpath.parent.mkdir(exist_ok=True)\n logging.basicConfig(filename=logpath, level=logging.DEBUG)", "def pre_start(self):\n self.make_runpath_dirs()", "def setup_log_dir():\n log_dir = get_log_dir()\n if log_dir.endswith('latest'):\n shutil.rmtree(log_dir, ignore_errors=True)\n mkdirs(log_dir)\n return log_dir", "def init():\n for team_id in TEAM_DICT.keys():\n create_game_logs_file(team_id)", "def initialize_output_files(self):\r\n if not self.C.restart:\r\n print(\"* Touching output files.\", flush=True)\r\n # begin writing `generation.csv` file\r\n csv_path_and_filename = self.C.job_dir + \"generation.csv\"\r\n util.properties_to_csv(\r\n prop_dict=self.ts_properties,\r\n csv_filename=csv_path_and_filename,\r\n epoch_key=\"Training set\",\r\n append=False,\r\n )\r\n\r\n # begin writing `convergence.csv` file\r\n util.write_model_status(append=False)\r\n\r\n # create `generation/` subdirectory to write generation output to\r\n os.makedirs(self.C.job_dir + \"generation/\", exist_ok=True)", "def __init__(self, run_config):\n print('Initializing logs...')\n log_root = run_config['log_root_path']\n self._save_iter = run_config['save_iter']\n self._best_epoch = False\n if run_config['resume_path']:\n # resume an old experiment\n self.log_dir = run_config['resume_path']\n if os.path.exists(os.path.join(log_root, self.log_dir)):\n self.log_path = os.path.join(log_root, self.log_dir)\n print(' Resuming experiment ' + self.log_dir)\n else:\n raise Exception('Experiment folder ' + self.log_dir + ' not found.')\n else:\n # start a new experiment\n if 'log_dir' not in run_config:\n self.log_dir = ''\n else:\n self.log_dir = run_config['log_dir']\n self.log_dir += strftime(\"%b_%d_%Y_%H_%M_%S\") + '/'\n self.log_path = os.path.join(log_root, self.log_dir)\n os.makedirs(self.log_path)\n os.system(\"rsync -au --include '*/' --include '*.py' --exclude '*' . \" + self.log_path + \"source\")\n os.makedirs(os.path.join(self.log_path, 'metrics'))\n os.makedirs(os.path.join(self.log_path, 'checkpoints'))\n self.epoch = 1\n print(' Starting experiment ' + self.log_dir)", "def __init_log_folder():\n try:\n os.makedirs(Logger.__log_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise e", "def setup(outpath):\n time = datetime.now().strftime(\"%d_%m_%Y_%H_%M_%S\")\n temp = os.path.join(outpath, \"data\", \"temp\")\n result = os.path.join(outpath, \"results\")\n logs = os.path.join(outpath, \"logs\")\n download = os.path.join(outpath, \"data\", \"download\")\n chromsizes = os.path.join(outpath,\n \"data\", \"chromsizes\")\n if not os.path.exists(download):\n os.makedirs(download)\n if not os.path.exists(temp):\n os.makedirs(temp)\n if not os.path.exists(result):\n os.makedirs(result)\n if not os.path.exists(logs):\n os.makedirs(logs)\n if not os.path.exists(chromsizes):\n os.makedirs(chromsizes)\n\n logname = time + \"_tfanalyzer.log\"\n logfile = os.path.join(logs, logname)\n logging.basicConfig(filename=logfile, level=logging.INFO)\n return logfile", "def setupRunDir(self):\n\n pass", "def initialize():\n if not os.path.isfile(WORK_LOG_FILENAME):\n with open(WORK_LOG_FILENAME, 'a', newline='') as work_log:\n work_log_writer = csv.DictWriter(work_log, fieldnames=FIELDNAMES)\n work_log_writer.writeheader()", "def create_dirs():\n run(\"mkdir -p %s\"%RUN_DIR)\n run(\"mkdir -p %s\"%LOG_DIR)", "def CreateLoggingDirectories(\n dataset_root: Path, model_name: str, analysis: str, run_id: str = None\n) -> Path:\n run_id = run_id or time.strftime(\"%y:%m:%dT%H:%M:%S\")\n log_dir = dataset_root / \"logs\" / model_name / analysis / run_id\n if log_dir.is_dir():\n raise OSError(\n f\"Logs directory already exists. Refusing to overwrite: {log_dir}\"\n )\n logging.info(\"Writing logs to %s\", log_dir)\n log_dir.mkdir(parents=True)\n (log_dir / \"epochs\").mkdir()\n (log_dir / \"checkpoints\").mkdir()\n (log_dir / \"graph_loader\").mkdir()\n return log_dir", "def _initialize_directory(self):\n self._generate_settings()\n if os.path.exists(self.target_path):\n sys.exit(\"WARNING: %s already exists, exiting\" % self.target_path)\n self._print_initialize_message()\n self._create_directories()\n self._create_general_config_file()\n self._create_default_pipeline_config_file()\n self._create_filelist()\n print", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def logs_directory(self):", "def createDirectories(self):\n # -- LOG\n thepath = os.path.dirname(self.settings.logfile)\n distutils.dir_util.mkpath(thepath)\n\n # -- SESSION \n thepath = self.settings.sessionpath\n distutils.dir_util.mkpath(thepath)\n\n # -- DATABASE\n thepath = self.settings.dbpath\n distutils.dir_util.mkpath(thepath)", "def setup_directories():\n run('mkdir -p %(path)s' % env)\n run('mkdir -p %(env_path)s' % env)\n run('mkdir -p %(log_path)s;' % env)\n sudo('chgrp -R www-data %(log_path)s; chmod -R g+w %(log_path)s;' % env)\n \n with settings(warn_only=True):\n run('ln -s %(log_path)s %(path)s/logs' % env)", "def _initialize_log_file(config):\n for settings in config[\"handlers\"].values():\n if _is_file_handler(settings):\n log_path = Path(settings[\"filename\"])\n log_path.parent.mkdir(parents=True, exist_ok=True)\n log_path.touch(exist_ok=True)", "def _create_run_dir(self):\n task_name = 'task_'+str(self.setting['task_id'])\n run_name = '_'.join(['run', \n str(self.setting['run_idx']), \n str(self.setting['_id'])])\n \n run_dir = op.join(Job_Requestor.run_center, task_name, run_name)\n back_dir = op.join(run_dir, '.backup')\n try: \n os.makedirs(run_dir)\n os.makedirs(back_dir)\n os.symlink(Job_Requestor.data_dir, op.join(run_dir, 'data')) \n\n except:\n print(\"Error: run dir %s create failed\"%(run_dir))\n # sys.exit(0)\n \n self.run_dir = run_dir\n self.setting.update({'run_dir':run_dir})\n self._db_update({'run_dir': run_dir})", "def __setup_output_directory(self):\n print('Setting up output directory')\n time_stamp = datetime.now().strftime(\"%d-%m-%Y-%H-%M-%S\")\n self.output_path = os.path.join(self.output_base_path, '%s_%s' % (self.execution_name, time_stamp))\n print('- Creating output directory: %s' % self.output_path)\n os.makedirs(self.output_path)\n print('- Output directory created')", "def _create_paths(self):\r\n\r\n # Copying the file 'PCU_logs.robot' to the folder with test suites.\r\n if not os.path.exists('\\\\'.join([self.path, self.log_test])):\r\n shutil.copy(self.log_test, self.path)\r\n\r\n # Moving to test suites directory\r\n os.chdir(self.path)\r\n\r\n # Create a directory for the test suite\r\n if not os.path.exists(self.output_dir_path):\r\n os.makedirs(self.output_dir_path)", "def create_save_directories(self):\n # Set the name for the saved model and training summary directory\n self.model_dir = op.join('../logs', self.name, 'models')\n self.train_summary_dir = op.join('../logs', self.name, 'training_summary')\n\n if not op.exists(self.model_dir):\n if not op.exists(op.join('../logs', self.name)):\n if not op.exists('../logs'):\n os.mkdir('../logs')\n os.mkdir(op.join('../logs', self.name))\n os.mkdir(self.model_dir)\n\n if not op.exists(self.train_summary_dir):\n if not op.exists(op.join('../logs', self.name)):\n if not op.exists('../logs'):\n os.mkdir('../logs')\n os.mkdir(op.join('../logs', self.name))\n os.mkdir(self.train_summary_dir)\n return self", "def init( args ):\n global save_dir_ \n global args_ \n args_ = args\n now = datetime.datetime.now()\n timeStamp = now.strftime('%Y-%m-%d_%H-%M-%S')\n outdir = 'Bee%s' % timeStamp\n save_dir_ = os.path.join( save_dir_, outdir )\n if os.path.exists(save_dir_):\n save_dir_ = os.path.join(save_dir_, timeStamp)\n os.makedirs(save_dir_)\n else:\n os.makedirs(save_dir_) \n\n print( '[INFO] Init is done' )", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def _create_log_dir():\n if not os.path.exists(FLASK_APP.config[\"LOG_DIR\"]):\n os.makedirs(FLASK_APP.config[\"LOG_DIR\"])", "def setup_training_run_folder():\n\n number = input(\"Please enter the training run number (eg. 1, 2, 3): \")\n\n while len(number) < 4:\n number = '0' + number # uniform length test number XXXX\n\n saves = list(glob.glob('saves/*'))\n\n for save in saves:\n if number in save: # avoid learning rate / test number clash\n print(\"WARNING:\")\n print(\"Training run number {} already exists.\".format(number))\n answer = input(\"Are you sure you want to delete it? (y/n): \")\n if answer.lower() == 'y':\n shutil.rmtree('saves/{}'.format(number))\n else:\n raise ValueError(\"Training run number already exists in save files.\")\n\n save_folder = 'saves/{}'.format(number)\n os.mkdir(save_folder)\n\n # copy the current config file over for posterity\n config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n '../config.json')\n new_path = os.path.join(save_folder, '{}_config.json'.format(number))\n shutil.copyfile(config_path, new_path)\n\n # Set up loggers to write to file\n rootLogger = logging.getLogger() # logger used by other modules\n rootLogger.setLevel(20)\n fawLogger = logging.Logger('FAW_logger') # dedicated FAW logger\n fawLogger.setLevel(10)\n\n streamFormatter = logging.Formatter(\n \"%(message)s\")\n streamHandler = logging.StreamHandler()\n streamHandler.setFormatter(streamFormatter)\n fawLogger.addHandler(streamHandler)\n \n logFormatter = logging.Formatter(\n \"%(asctime)s [%(levelname)-5.5s] %(message)s\")\n fileHandler = logging.FileHandler(\"{0}/{1}.log\".format(save_folder, number))\n fileHandler.setFormatter(logFormatter)\n fawLogger.addHandler(fileHandler)\n rootLogger.addHandler(fileHandler)\n\n return number, save_folder, fawLogger", "def initialize_logger():\n if not os.path.exists(LOGGING_DIRECTORY):\n os.makedirs(LOGGING_DIRECTORY)\n os.chmod(LOGGING_DIRECTORY, 0o777)", "def create_logs(self):\n print(\"creating logs...\")\n with open(self.log_file,'w') as log:\n writer = csv.writer(log)\n writer.writerow(['population',\n 'avg_age',\n 'avg_surv',\n 'avg_repro',\n # 'avg_neighbors_1',\n # 'avg_neighbors_2',\n # 'avg_neighbors_3',\n # 'avg_neighbors_4',\n # 'avg_neighbors_5',\n # 'avg_neighbors_6',\n # 'avg_neighbors_7',\n # 'avg_neighbors_8',\n 'number_of_clusters',\n 'clusters_10e1',\n 'clusters_10e2',\n 'clusters_10e3',\n 'clusters_10e4',\n 'clusters_10e5'])\n print(\"Logs created @ {}\".format(self.log_file))", "def _init_log(self):\n if not os_path_exists(self.log_file):\n self._write('', 'w')", "def setUp(self):\n self.path = tempfile.mkdtemp()\n self.log = log.Log(self.path)", "def init_log_file(self):\r\n try:\r\n os.makedirs(config[\"server_log_path\"])\r\n except OSError:\r\n if not os.path.isdir(config[\"server_log_path\"]):\r\n raise\r\n server_log_file = logging.FileHandler(\r\n config[\"server_log_path\"] + 'server_log_' + time.strftime('%Y-%m-%d_%H.%M.%S') + '.txt')\r\n server_log_file.setLevel(logging.DEBUG)\r\n server_log_file.setFormatter(file_formatter)\r\n server_log.addHandler(server_log_file)", "def new_custom_log_dir(self) -> str:", "def _setup_run(cfg: Dict) -> Dict:\n now = datetime.now()\n day = f\"{now.day}\".zfill(2)\n month = f\"{now.month}\".zfill(2)\n hour = f\"{now.hour}\".zfill(2)\n minute = f\"{now.minute}\".zfill(2)\n run_name = f'run_{day}{month}_{hour}{minute}_seed{cfg[\"seed\"]}'\n # cfg[\"run_dir\"] = Path(__file__).absolute().parent / \"runs\" / run_name\n cfg[\"run_dir\"] = cfg[\"run_dir\"] / run_name\n if not cfg[\"run_dir\"].is_dir():\n cfg[\"train_dir\"] = cfg[\"run_dir\"] / \"data\" / \"train\"\n cfg[\"train_dir\"].mkdir(parents=True)\n cfg[\"val_dir\"] = cfg[\"run_dir\"] / \"data\" / \"val\"\n cfg[\"val_dir\"].mkdir(parents=True)\n else:\n raise RuntimeError(f\"There is already a folder at {cfg['run_dir']}\")\n\n # dump a copy of cfg to run directory\n with (cfg[\"run_dir\"] / \"cfg.json\").open(\"w\") as fp:\n temp_cfg = {}\n for key, val in cfg.items():\n if isinstance(val, PosixPath):\n temp_cfg[key] = str(val)\n elif isinstance(val, Dict):\n for k in val:\n if isinstance(val[k], PosixPath):\n val[k] = str(val[k])\n elif isinstance(val, pd.Timestamp):\n temp_cfg[key] = val.strftime(format=\"%d%m%Y\")\n else:\n temp_cfg[key] = val\n json.dump(temp_cfg, fp, sort_keys=True, indent=4)\n\n return cfg", "def create_dirs():\n os.makedirs(ORIGINAL_LOG_DIR, exist_ok=True)", "def make_log():\n log_file = os.path.join(phys_dir,'ge_phys2bids_'+datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")+'.log')\n with open(log_file,'w') as log:\n log.write('-------- GE phys2bids --------\\n\\n')\n log.write('DICOM directory: %s\\n'%dcm_dir)\n log.write('Physiology directory: %s\\n'%phys_dir)\n log.write('Output directory: %s\\n\\n'%out_dir)\n log.write('%d EPI files were found\\n\\n'%len(dcm_dict))\n for rn in dcm_dict.keys():\n log.write('------------------------------\\n')\n log.write('%s\\n'%dcm_dict[rn]['out_name'])\n log.write('Start time: %s\\n'%dcm_dict[rn]['start_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('End time: %s\\n'%dcm_dict[rn]['end_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('PPG file: %s\\n'%dcm_dict[rn]['ppg_file'])\n log.write('Respiration file: %s\\n'%dcm_dict[rn]['resp_file'])\n log.write('ECG file: %s\\n'%dcm_dict[rn]['ecg_file'])\n log.write('------------------------------\\n\\n')", "def Create_log():\r\n \"\"\"And Maintain log file to the current date in MMM_DD_YY format\"\"\"\r\n \r\n name = multiprocessing.current_process().name\r\n config = config_create()\r\n Stream = config.get('Log', 'Log1')\r\n Tweet = config.get('Log', 'Log2')\r\n OverallLog = config.get('Log', 'Log3')\r\n \r\n uscore = '_'\r\n txtn = '.txt'\r\n StreamL = uscore +Stream+ txtn\r\n TweetL = uscore +Tweet+ txtn\r\n OverallLogL = OverallLog+txtn\r\n \r\n \r\n \r\n name = multiprocessing.current_process().name\r\n StreamFileName = time.strftime(\"%b_%d_%y\")+StreamL\r\n TweetFileName = time.strftime(\"%b_%d_%y\")+TweetL\r\n config.set('Latest_Log', 'currentstreamlog',StreamFileName)\r\n config.set('Latest_Log', 'currenttweetlog',TweetFileName)\r\n config.set('Latest_Log', 'overalllog',OverallLogL)\r\n \r\n with open('botconfig.ini', 'w') as x:\r\n config.write(x)\r\n if os.path.isfile(StreamFileName) is False:\r\n open(StreamFileName, 'w')\r\n \r\n if os.path.isfile(OverallLogL) is False:\r\n open(OverallLogL, 'w')\r\n \r\n if os.path.isfile(TweetFileName) is False:\r\n twfile = open(TweetFileName, 'w')\r\n ## Edit this or comment to change first line entered upon\r\n ## File creation\r\n twfile.write('0 ComicTweetBot')\r\n #time.sleep(1)\r\n #Create_log()\r", "def initialize_summary(self):\n if self.need_logs:\n self.summary_writer = tf.summary.create_file_writer(self.log_dir)\n if self.verbose > 0:\n full_log_path = os.path.abspath(self.log_dir)\n print('Initialize logs, use: \\ntensorboard --logdir={}'.format(full_log_path))", "def gen_paths():\n global log_dir, events_file, log_file, datastream_dir\n\n #The root log directory\n log_root_dir = os.path.join(root_dir, \"logs\")\n\n #Figure out what log file index we should use\n #The log file index is a 4-digit number corresponding to an unused log folder\n index = 0\n #If our base log_root_dir exists:\n if os.path.exists(log_root_dir):\n\n #Get existing folders, convert to string list, and sort\n folders = os.listdir(log_root_dir)\n ids = [int(f) for f in folders]\n ids.sort()\n\n #This algorithm determines the next sequential value for our log index, it scans through the existing numbers\n #until either it finds a missing number in sequence, or runs out of numbers to scan.\n\n #Set this to a high number to start with, as it will get set every loop iteration\n last_id = 10000\n for present_index in ids:\n #If we have a break in the number sequence, abort and use what we have\n if present_index > last_id + 1:\n break\n #If we have found a bigger number to use for index\n if present_index > index:\n index = present_index\n\n last_id = present_index\n\n #Convert from largest existing index to the index we should use!\n index += 1\n\n #Set the log_dir, which is the directory for storing all logs during this run session\n log_dir = os.path.join(log_root_dir, str(index).zfill(4))\n\n #Set the log_file, which is a dump of all console output\n log_file = os.path.join(log_dir, \"main.log\")\n\n #Set the events_file, which is where all events are recorded\n events_file = os.path.join(log_dir, \"events.rec\")\n\n #Set the datastream_dir, within which all datastreams are recorded\n datastream_dir = os.path.join(log_dir, \"datastreams\")", "def test_default_log_dir(self, cleanup_local_folder):\n test_trainer = pl.Trainer(checkpoint_callback=False, logger=False)\n\n log_dir = exp_manager(test_trainer, {\"create_tensorboard_logger\": False, \"create_checkpoint_callback\": False})\n assert (log_dir / \"..\").resolve() == Path(\"./nemo_experiments/default/\").resolve()\n assert Path(\"./nemo_experiments\").exists()\n assert Path(\"./nemo_experiments/default/\").exists()\n sub_dirs = [x for x in Path(\"./nemo_experiments/default/\").iterdir() if x.is_dir()]\n assert len(sub_dirs) == 1\n assert re.match(r\"[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}-[0-9]{2}-[0-9]{2}\", sub_dirs[0].name)", "def _create_result_directory(self):\n\t\tFileSystem.create_dir(self._result_directory_name)\n\t\tFileSystem.create_dir(self._result_directory_name + \"/\" + \"Log\")\n\t\tFileSystem.create_dir(self._result_directory_name + \"/\" + \"Dump\")", "def make_dirs():\n global paths_made\n\n #Have we done this already? Then why are we trying to do it again?\n if paths_made:\n return\n\n #Make the dirs\n os.makedirs(log_dir, exist_ok=True)\n os.makedirs(datastream_dir, exist_ok=True)\n paths_made = True", "def prerun(timestamp):\r\n if not os.path.isdir('log'):\r\n os.makedirs('log')\r\n if not os.path.isdir('collected'):\r\n os.makedirs('collected')\r\n if not os.path.isdir('done'):\r\n os.makedirs('done')\r\n time_stamped_folder = os.path.join('collected', timestamp)\r\n if not os.path.isdir(time_stamped_folder):\r\n os.makedirs(time_stamped_folder)\r\n return time_stamped_folder", "def _create_target_directories(self):\n if os.path.exists(self.PREPROCESSED_DATA_OUT_DIR):\n if self._hparams.over_write:\n print_info(\"Deleting data folder: {}\".format(self.PREPROCESSED_DATA_OUT_DIR))\n shutil.rmtree(self.PREPROCESSED_DATA_OUT_DIR)\n print_info(\"Recreating data folder: {}\".format(self.PREPROCESSED_DATA_OUT_DIR))\n os.makedirs(self.PREPROCESSED_DATA_OUT_DIR)\n else:\n print_info(\"Skipping preprocessing step, since the data might already be available\")\n else:\n print_info(\"Creating data folder: {}\".format(self.PREPROCESSED_DATA_OUT_DIR))\n os.makedirs(self.PREPROCESSED_DATA_OUT_DIR)", "def init_logging(input_file_parameters, dir_stacks):\r\n fl_name = '{0}_log_{1}_{2}.txt'.format(NAME,\r\n START_TIME,\r\n input_file_parameters.job_name)\r\n #NOTICE! Current_log_path.path is changed here!\r\n CurrentLogPath.path = os.path.join(input_file_parameters.output_dir,\r\n fl_name)\r\n logging.basicConfig(filename=CurrentLogPath.path, filemode='w',\r\n format='%(asctime)s %(levelname)s:%(message)s',\r\n level=logging.INFO)\r\n logging.info('{0} v. {1} started'.format(NAME, VERSION))\r\n logging.info('Job name: {0}'.format(input_file_parameters.job_name))\r\n logging.info('Starting point directory:\\n{0}'.format(dir_stacks[0]\r\n .path))\r\n logging.info('Output directory:\\n{0}'.format(input_file_parameters.output_dir))\r\n logging.info('-'*80)\r\n logging.info('staplefile contents:\\n{0}'.format('\\n'.join(input_file_parameters.staplefile)))\r\n logging.info('-'*80)\r\n logging.info('config.txt contents:\\n{0}'\r\n .format(utils.get_config_file()))\r\n logging.info('-'*80)", "def __init__(self, level, general_log_path, outputs_folder):\n self.log_level = level\n\n # self.general_log_file = general_log_path.open('w')\n self.general_log_file = GCOpen(general_log_path, 'w')\n self.general_log_file.open()\n\n self.file_outputs_dir = outputs_folder / 'output_files'\n # self.file_outputs_dir.mkdir(exist_ok=True)\n\n exp_name = str(outputs_folder).split('/')[-1]\n\n self.summary_writer = SummaryWriter(log_dir=str(TEMP_FOLDER),\n filename_suffix='.' + exp_name)\n tf_filename = find_tf_event(exp_name)\n self.sw_local_path = Path(TEMP_FOLDER) / tf_filename\n self.sw_gc_path = outputs_folder / tf_filename\n\n self.log(\"Starting new experiment at \" +\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n self.log(\"User: \" + getpass.getuser())\n self.log(\"Host: \" + socket.gethostname())\n\n Logger.unique_logger = self", "def setup(self, tmp_path):\n create_users_file(tmp_path)\n create_jobs_file(tmp_path)", "def _log_name(self, dir_name, job_name):\n base_dir = self.base_dir\n # Every counter is a file opened in append mode and closed\n # immediately to avoid race conditions in parallel computing: \n # file appends are atomic\n open(os.path.join(base_dir, 'log.current'), \n 'a').write('%s/%s\\n' % (dir_name, job_name))\n t = time.localtime()\n year_dir = os.path.join(base_dir, 'log.%i' % t.tm_year)\n try:\n os.mkdir(year_dir)\n except OSError:\n \"Dir exists\"\n month_dir = os.path.join(year_dir, '%02i' % t.tm_mon)\n try:\n os.mkdir(month_dir)\n except OSError:\n \"Dir exists\"\n open(os.path.join(month_dir, '%02i.log' % t.tm_mday), \n 'a').write('%s/%s\\n' % (dir_name, job_name))", "def setUp(self):\n if os.path.isfile(LOGFILENAME):\n os.remove(LOGFILENAME)", "def set_log_dir(self, model_path=None):\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n# now = datetime.datetime.now()\n# \n# # If we have a model path with date and epochs use them\n# if model_path:\n# # Continue from we left of. Get epoch and date from the file name\n# # A sample model path might look like:\n# # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5\n# regex = r\".*/[\\w-]+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})/mask\\_rcnn\\_[\\w-]+(\\d{4})\\.h5\"\n# m = re.match(regex, model_path)\n# if m:\n# now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n# int(m.group(4)), int(m.group(5)))\n# # Epoch number in file is 1-based, and in Keras code it's 0-based.\n# # So, adjust for that then increment by one to start from the next epoch\n# self.epoch = int(m.group(6)) - 1 + 1\n# print('Re-starting from epoch %d' % self.epoch)\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \n \"siamese_{}_{}_{}\".format(self.config.MODEL.lower(), \n self.config.NAME.lower(), \n self.config.EXPERIMENT.lower()))\n\n # Create log_dir if not exists\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"siamese_mrcnn_*epoch*.h5\")\n self.checkpoint_path = self.checkpoint_path.replace(\"*epoch*\", \"{epoch:04d}\")", "def init_error_files(self): \n \n dir_path = self.init_logs_directory()\n log_errors = self.join_path(dir_path, PATH_FOR_LOG_ERRORS)\n \n return log_errors", "def logdir(self):\n return osp.join('runs/', self.net_name, '')", "def on_train_begin(self, logs=None):\n f = open(self.log_file_path, \"a\")\n f.write(f\"{'=' * 5}{self.model_name}({self.hp_log_title}){'=' * 5}\\n\")\n f.close()", "def _begin_logging(self):\n logconf.set_up_root_logger(self.opts.logfile)", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def setup():\n print('...')\n # Make sure dirs exist\n for directory in [DATA_DIR, DATA_INPUT_DIR, DATA_OUTPUT_DIR]:\n os.makedirs(directory, exist_ok=True)", "def log_data(self):\n\n self.check_dir()\n with open(self.log_file, \"a\") as logger_file:\n logger_file.write(\"{}, {}\\n\".format(self.time, self.msg))", "def _pre_create_runs_and_time_series(self):\n self._logdir_loader_pre_create.synchronize_runs()\n run_to_events = self._logdir_loader_pre_create.get_run_events()\n if self._run_name_prefix:\n run_to_events = {\n self._run_name_prefix + k: v for k, v in run_to_events.items()\n }\n\n run_names = []\n run_tag_name_to_time_series_proto = {}\n for (run_name, events) in run_to_events.items():\n run_names.append(run_name)\n for event in events:\n _filter_graph_defs(event)\n for value in event.summary.value:\n metadata, is_valid = self._request_sender.get_metadata_and_validate(\n run_name, value\n )\n if not is_valid:\n continue\n if metadata.data_class == summary_pb2.DATA_CLASS_SCALAR:\n value_type = (\n tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR\n )\n elif metadata.data_class == summary_pb2.DATA_CLASS_TENSOR:\n value_type = (\n tensorboard_time_series.TensorboardTimeSeries.ValueType.TENSOR\n )\n elif metadata.data_class == summary_pb2.DATA_CLASS_BLOB_SEQUENCE:\n value_type = (\n tensorboard_time_series.TensorboardTimeSeries.ValueType.BLOB_SEQUENCE\n )\n\n run_tag_name_to_time_series_proto[\n (run_name, value.tag)\n ] = tensorboard_time_series.TensorboardTimeSeries(\n display_name=value.tag,\n value_type=value_type,\n plugin_name=metadata.plugin_data.plugin_name,\n plugin_data=metadata.plugin_data.content,\n )\n\n self._one_platform_resource_manager.batch_create_runs(run_names)\n self._one_platform_resource_manager.batch_create_time_series(\n run_tag_name_to_time_series_proto\n )", "def _setup_dirs(\n self, save_dir: Path, log_dir: Path\n ) -> Tuple[SummaryWriter, SummaryWriter, Path]:\n save_dir = save_dir.expanduser()\n log_dir = log_dir.expanduser()\n\n # Log and save to a timestamped directory, since we don't want to\n # accidently overwrite older logs and models\n curr_date = datetime.now().astimezone()\n\n timestamped_log_dir = log_dir / curr_date.isoformat()\n try:\n timestamped_log_dir.mkdir(parents=True)\n except Exception:\n # had to add this case because the original isoformat\n # can't be used as dir_name in Windows\n timestamped_log_dir = log_dir / curr_date.isoformat().replace(\n \":\", \".\"\n )\n timestamped_log_dir.mkdir(parents=True)\n\n timestamped_save_dir = save_dir / curr_date.isoformat()\n timestamped_save_dir.mkdir(parents=True)\n\n # Save hyper-params as a TOML file for reference\n config = {**vars(self.config), \"date\": curr_date}\n for dest in timestamped_save_dir, timestamped_log_dir:\n with open(dest / self.CONFIG_NAME, \"w\") as f:\n toml.dump(config, f)\n\n # Use separate summary writers so that training and validation losses\n # can be viewed on the same graph in TensorBoard\n train_writer = SummaryWriter(str(timestamped_log_dir / \"training\"))\n val_writer = SummaryWriter(str(timestamped_log_dir / \"validation\"))\n\n return train_writer, val_writer, timestamped_save_dir", "def init_logs() -> None:\n logging.basicConfig(\n filename=\"logs.txt\",\n filemode=\"w\",\n format=\"%(asctime)s:%(levelname)s:%(filename)s - %(message)s\",\n level=logging.ERROR,\n )\n\n formatter = logging.Formatter(\n \"%(asctime)s:%(levelname)s:%(filename)s - %(message)s\"\n )\n\n global logger\n logger = logging.getLogger(__name__)\n\n # simlogger = logging.getLogger(\"netsquid\")\n # simlogger.setLevel(logging.DEBUG)\n # fhandler = logging.FileHandler(\"simlogs.txt\", mode=\"w\")\n # fhandler.setFormatter(formatter)\n # simlogger.addHandler(fhandler)\n\n # shandler = logging.StreamHandler(stream=sys.stdout)\n # shandler.setLevel(logging.ERROR)\n # shandler.setFormatter(formatter)\n # simlogger.addHandler(shandler)", "def create_file_logs(config, board, tests_to_run, logger):\n combined_list = []\n\n def add_to_combined_list(log, name, combined_list=combined_list):\n for line in log.split(\"\\r\\n\"):\n try:\n if line == \"\":\n continue\n if line.startswith(\"\\n\"):\n line = line[1:]\n if line.startswith(\" [\"):\n line = line[1:]\n ts, text = line.split(\"]\", 1)\n timestamp = float(ts[1:-1])\n else:\n text = line\n timestamp = 0.0\n combined_list.append(\n {\"time\": timestamp, \"text\": str(text), \"name\": name}\n )\n except Exception as error:\n logger.error(error)\n logger.debug(f\"Failed to parse log line = {repr(line)}\")\n\n idx = 1\n console_combined = []\n for console in board.consoles:\n with open(os.path.join(config.output_dir, f\"console-{idx}.log\"), \"w\") as clog:\n clog.write(console.log)\n add_to_combined_list(console.log, f\"console-{idx}\")\n add_to_combined_list(console.log_calls, f\"console-{idx}\")\n add_to_combined_list(console.log, \"\", console_combined)\n idx = idx + 1\n\n def write_combined_log(combined_list, fname):\n with open(os.path.join(config.output_dir, fname), \"w\") as clog:\n for e in combined_list:\n try:\n if e[\"name\"] == \"\":\n clog.write(f\"[{e['time']}]{repr(e['text'])}\\r\\n\")\n else:\n clog.write(f\"{e['name']}: [{e['time']}] {repr(e['text'])}\\n\")\n except Exception as error:\n logger.error(error)\n logger.debug(f\"failed to parse line: {repr(e)}\")\n\n import operator\n\n console_combined.sort(key=operator.itemgetter(\"time\"))\n write_combined_log(console_combined, \"console-combined.log\")\n\n for device in config.devices:\n with open(os.path.join(config.output_dir, device + \".log\"), \"w\") as clog:\n d = getattr(config, device)\n if hasattr(d, \"log\"):\n clog.write(d.log)\n add_to_combined_list(d.log, device)\n add_to_combined_list(d.log_calls, device)\n\n for test in tests_to_run:\n if hasattr(test, \"log\") and test.log != \"\":\n with open(\n os.path.join(config.output_dir, f\"{test.__class__.__name__}.log\"), \"w\"\n ) as clog:\n clog.write(test.log)\n if hasattr(test, \"log_calls\"):\n add_to_combined_list(test.log_calls, test.__class__.__name__)\n\n combined_list.sort(key=operator.itemgetter(\"time\"))\n write_combined_log(combined_list, \"all.log\")", "def collect_log(self):\n path = 'cluster_test_%d/*.log' % self.address[1]\n src = \"%s@%s:%s\" % (self.user_name, self.address[0], path)\n dest = console_config._log_path\n self._rsync(src, dest)", "def init() -> None:\n log_format = logging.Formatter(\"%(levelname)s || %(name)s || %(asctime)s || %(message)s\")\n\n log_file = Path(\"logs\", \"rl_snake.log\")\n log_file.parent.mkdir(exist_ok=True)\n\n file_handler = handlers.RotatingFileHandler(\n log_file,\n maxBytes=3000000,\n backupCount=5\n )\n file_handler.setFormatter(log_format)\n\n root_logger = logging.getLogger()\n root_logger.addHandler(file_handler)\n root_logger.setLevel(logging.DEBUG if constants.Misc.debug else logging.INFO)\n\n root_logger.info(\"Root logger initilised\")", "def start_check():\n if not os.path.exists(outfancy_temp_files):\n os.mkdir(outfancy_temp_files)\n if not os.path.exists(outfancy_temp_files + log_file):\n os.system('touch ' + outfancy_temp_files + log_file)", "def create_file_logs(config, board, tests_to_run, logger):\n combined_list = []\n\n def add_to_combined_list(log, name, combined_list=combined_list):\n for line in log.split(\"\\r\\n\"):\n try:\n if line == \"\":\n continue\n if line.startswith(\"\\n\"):\n line = line[1:]\n if line.startswith(\" [\"):\n line = line[1:]\n ts, text = line.split(\"]\", 1)\n timestamp = float(ts[1:-1])\n else:\n text = line\n timestamp = 0.0\n combined_list.append(\n {\"time\": timestamp, \"text\": str(text), \"name\": name}\n )\n except Exception as error:\n logger.error(error)\n logger.debug(\"Failed to parse log line = %s\" % repr(line))\n\n idx = 1\n console_combined = []\n for console in board.consoles:\n with open(os.path.join(config.output_dir, \"console-%s.log\" % idx), \"w\") as clog:\n clog.write(console.log)\n add_to_combined_list(console.log, \"console-%s\" % idx)\n add_to_combined_list(console.log_calls, \"console-%s\" % idx)\n add_to_combined_list(console.log, \"\", console_combined)\n idx = idx + 1\n\n def write_combined_log(combined_list, fname):\n with open(os.path.join(config.output_dir, fname), \"w\") as clog:\n for e in combined_list:\n try:\n if e[\"name\"] == \"\":\n clog.write(\"[%s]%s\\r\\n\" % (e[\"time\"], repr(e[\"text\"])))\n else:\n clog.write(\n \"%s: [%s] %s\\n\" % (e[\"name\"], e[\"time\"], repr(e[\"text\"]))\n )\n except Exception as error:\n logger.error(error)\n logger.debug(\"failed to parse line: %s\" % repr(e))\n\n import operator\n\n console_combined.sort(key=operator.itemgetter(\"time\"))\n write_combined_log(console_combined, \"console-combined.log\")\n\n for device in config.devices:\n with open(os.path.join(config.output_dir, device + \".log\"), \"w\") as clog:\n d = getattr(config, device)\n if hasattr(d, \"log\"):\n clog.write(d.log)\n add_to_combined_list(d.log, device)\n add_to_combined_list(d.log_calls, device)\n\n for test in tests_to_run:\n if hasattr(test, \"log\") and test.log != \"\":\n with open(\n os.path.join(config.output_dir, \"%s.log\" % test.__class__.__name__), \"w\"\n ) as clog:\n clog.write(test.log)\n if hasattr(test, \"log_calls\"):\n add_to_combined_list(test.log_calls, test.__class__.__name__)\n\n combined_list.sort(key=operator.itemgetter(\"time\"))\n write_combined_log(combined_list, \"all.log\")", "def test_generate_logs(self):\n debug = True\n # Extra \"dummy\" files will be generated to the system to get the system to\n # the total_file_count\n total_file_count = 1020\n # Time in days to erase files\n erase_threshold = 7 \n filename_header = 'event-log-'\n log_directory = '/hd/logs'\n #log_directory = '/hdp/logs'\n\n \n \n password = get_hidden_password()\n self.ssx.open_hidden_shell(password)\n \n ###########\n ## Warning!\n ###########\n self.myLog.info(\"Warning:\")\n self.myLog.info(\"This automatoin makes use of tail and head\")\n self.myLog.info(\"Those binaries are not already on the SSX\")\n self.myLog.info(\"You need to copy both of them from the script directory to:\")\n self.myLog.info(\"/hd/logs otherwise the script will FAIL!\")\n \n #################################\n ## 1. Check the state of /hd/logs\n #################################\n self.myLog.info(\"Testing the %s location\" % log_directory)\n self.myLog.info(\"--------------------------------------\")\n ####################\n ## Get the file list\n \n command = 'cd ' + log_directory\n # This may cause a bug here!\n #raw_output = self.ssx.hidden_cmd(\"cd \\/hd\\/logs\", timeout=30)\n raw_output = self.ssx.hidden_cmd(command, timeout 30)\n if debug:\n print 'the return value was'\n print raw_output\n\n raw_output = self.ssx.hidden_cmd(\"pwd\", timeout=30)\n if debug:\n print 'the return value was'\n print raw_output\n\n raw_output = self.ssx.hidden_cmd(\"ls | grep event-log | wc\", timeout=30)\n if debug:\n print 'the return value was'\n print raw_output\n\n try:\n raw_file_count = raw_output.split()\n file_count = int(raw_file_count[0])\n except:\n self.myLog.error(\"Unable to parse the file count: %s\" % raw_file_count)\n \n # We have placed a copy of the following two tools onto the ssx in /hd/logs\n # tail, head\n # There is a retarded bug in tail that causes it to not be able to read files\n # via stdin. A work around for this bug is as follows:\n self.myLog.info(\"Executing workaround to make tail work\")\n raw_output = self.ssx.hidden_cmd(\"touch stdin\", timeout=10)\n \n \n self.myLog.info(\"Finding the oldest log file\")\n raw_output = self.ssx.hidden_cmd(\"ls event-log* | /hd/logs/head -1\")\n oldest_file = raw_output\n self.myLog.info(\"The oldest logs fils is %s\" % oldest_file)\n \n # Newest is the file with the highest date. (means it was created recently)\n self.myLog.info(\"Finding the newest file\") \n raw_output = self.ssx.hidden_cmd(\"ls event-log* | /hd/logs/tail -1\")\n newest_file = raw_output\n self.myLog.info(\"The newest log file is %s\" % newest_file)\n \n \n self.myLog.info(\"Checking the difference in date/time between the two logs\")\n newest_log_parts = newest_file.split(\"-\")\n newest_log_date = newest_log_parts[2]\n newest_log_time = newest_log_parts[3]\n newest_log_year = newest_log_date[0:4]\n newest_log_month = newest_log_date[4:6]\n newest_log_day = newest_log_date[6:8]\n \n oldest_log_parts = oldest_file.split(\"-\")\n oldest_log_date = oldest_log_parts[2]\n oldest_log_time = oldest_log_parts[3]\n oldest_log_year = oldest_log_date[0:4]\n oldest_log_month = oldest_log_date[4:6]\n oldest_log_day = oldest_log_date[6:8] \n \n \n self.myLog.info(\"Checking to see if the oldest file on the system 'should' be errased\")\n # Now we have all the parts of the date/time we need to compare them to see \n # if the time delta is greater then 7 days or some user defined value\n should_be_errased = False\n # If the years are different then errase\n if newest_log_year > oldest_log_year:\n self.myLog.info(\"files will be errased\")\n should_be_errased = True\n # If the day value is larger\n elif newest_log_day > oldest_log_day:\n if int(newest_log_day) - int(oldest_log_day) >= erase_threshold:\n self.myLog.info(\"files will be errased\")\n should_be_errased = True\n else:\n self.myLog.info(\"The oldest file is not old enough to be errased\")\n self.myLog.info(\"The oldest file is not old enough to be errased\")\n \n self.myLog.info(\"------------------------------------------------------------------------\")\n self.myLog.info(\"We will now generate files that are newer then the file errasal threshold\")\n self.myLog.info(\"This will cause ALL files that are newer then the date to be errased\")\n \n\n self.myLog.info(\"We found %s files in /hd/logs\" % file_count)\n files_to_generate = total_file_count - file_count\n self.myLog.info(\"For this test we will generate %s files\" % files_to_generate)\n \n \n #file_date = int(raw_file_date[2]) - 1\n if debug:\n self.myLog.debug(\"These are the variables we will use:\")\n self.myLog.debug(\"newest_log_year: %s\" % newest_log_year)\n self.myLog.debug(\"newest_log_day: %s\" % newest_log_day)\n self.myLog.debug(\"newest_log_month: %s\" % newest_log_month)\n self.myLog.debug(\"erase_threshold: %s\" % erase_threshold)\n file_date = str(newest_log_year) + str(newest_log_month)\n self.myLog.debug(\"file_date: %s\" % file_date)\n erase_day = int(newest_log_day) + int(erase_threshold) + 1\n self.myLog.debug(\"erase_day: %s\" % erase_day)\n file_date = file_date + str(erase_day)\n self.myLog.debug(\"file_date: %s\" % file_date)\n self.myLog.info(\"The fake log files will be generated with date: %s\" % file_date)\n \n \n ##########################\n ## Generate Fake log files\n ##########################\n \n for i in range(0,files_to_generate):\n if debug:\n print '===================================='\n print 'Workin on file number:', i, 'of:', files_to_generate\n complete_file_name = filename_header + str(file_date) + '-' + str.rjust(str(i), 6, '0')\n if debug:\n print 'The fake file will be called:', complete_file_name\n \n #command = 'cp ' + oldest_file + ' ' + complete_file_name\n command = 'touch ' + complete_file_name\n if debug:\n print 'The command will be:'\n print command\n \n raw_output = self.ssx.hidden_cmd(command)\n\n\n self.myLog.info(\"Done generating fake log files\")\n self.myLog.info(\"--------------------------------------\")\n #self.ssx.hidden_cmd(\"\\n\")\n self.ssx.close_hidden_shell()\n \n self.myLog.info(\"Checking to see the status of the log buffers\")\n log_counters = show_logging(self.ssx)\n self.myLog.info(\"The log files will be errased when:\")\n self.myLog.info(\"%s = %s\" % (log_counters['Glob-D']['Next-Ix 1'], log_counters['Glob-D']['Next Save-Ix 1']))\n log_events_to_generate = int(log_counters['Glob-D']['Next Save-Ix 1']) - int(log_counters['Glob-D']['Next-Ix 1'])\n self.myLog.info(\"We need to generate %s log events to cause the log to flush to disk\" % log_events_to_generate)", "def run(self):\n logger = self.logger\n cwd = os.getcwd()\n for step_name in self.steps_to_run:\n step = self.steps[step_name]\n if step.cached:\n logger.info(' * Cached: {}'.format(step_name))\n continue\n step.config = self.config\n new_log_file = self.new_step_log_file\n if self.log_filename is not None:\n step.log_filename = self.log_filename\n do_local_logging = True\n else:\n # We only want to do local log output if the step output is\n # being redirected to a file. Otherwise, we assume we're\n # probably just running one step and the local logging is\n # redundant and unnecessary\n do_local_logging = new_log_file\n\n if do_local_logging:\n logger.info(' * Running {}'.format(step_name))\n try:\n self._run_step(step, new_log_file)\n except BaseException:\n if do_local_logging:\n logger.info(' Failed')\n raise\n\n if do_local_logging:\n logger.info(' Complete')\n\n os.chdir(cwd)", "def init_log(path):\n file = open(path, 'w+')\n file.close()", "def setupLogging(self):\n\t\ttry:\n\t\t\tself.logger = logging.getLogger(__name__)\n\t\t\thandler = RotatingFileHandler(self.logFile, maxBytes=500000, backupCount=5)\n\t\t\tformat = \"%(asctime)s %(levelname)-8s %(message)s\"\n\t\t\thandler.setFormatter(logging.Formatter(format))\n\t\t\thandler.setLevel(logging.INFO)\n\t\t\tself.logger.addHandler(handler)\n\t\t\tself.logger.setLevel(logging.INFO)\n\t\texcept Exception as err:\n\t\t\terrorStr = 'Error initializing log file, ',err\n\t\t\tprint(errorStr)\n\t\t\texit(1)", "def makelog_and_prep_images():\n kai_util.makelog('../raw', instrument=osiris)\n\n # If you are reducing OSIRIS, you need to flip the images first. \n raw_files = glob.glob('../raw/i*.fits')\n osiris.flip_images(raw_files)\n\n # Download weather data we will need.\n dar.get_atm_conditions('2020')\n\n return", "def create_directories(working_dir, timesteps):\n\tfor time in timesteps:\n\t\tpath = working_dir + str(time) + '_ps_snapshot'\n\t\t\n\t\t# Make sure no old snapshot dir of same name exists - if it does, remove it.\n\t\tif os.path.isdir(path):\n\t\t\tprint('{} alread exists. Deleting it ...'.format(path))\n\t\t\tshutil.rmtree(path)\n\n\t\t# Copy the template\n\t\tshutil.copytree(working_dir + '.template_snapshot', path, symlinks=True)\n\t\tprint('{} initialized.'.format(path))\n\n\treturn path", "def init_config_vars(config):\r\n global run_id\r\n run_id = config.info.run_id\r\n global unique_op_dir\r\n unique_op_dir = os.path.join(config.info.output_dir, config.info.run_id)\r\n os.makedirs(unique_op_dir, exist_ok=True)", "def on_start(self):\n # self.login()\n\n # self.createfiles()", "def _setup_logging(self):\n if self.app_config_has(\"logging\"):\n log_config = self.app_config()[\"logging\"]\n filename_list = [\n v['filename'] for k, v in\n _find_config_tree(log_config, \"filename\")\n ]\n # pre-create directory in advance for all loggers\n for file in filename_list:\n file_dir = os.path.dirname(file)\n if file_dir and not os.path.isdir(file_dir):\n os.makedirs(file_dir, exist_ok=True)\n dictConfig(log_config)\n else:\n log = getLogger()\n handler = StreamHandler()\n formatter = Formatter(\n \"%(asctime)s-%(threadName)s-%(name)s-%(levelname)s-%(message)s\"\n )\n handler.setFormatter(formatter)\n log.addHandler(handler)\n log.setLevel(DEBUG)\n msg = (\"Starting \" + os.path.basename(__name__) +\n \" version \" + __version__ + \" on \" +\n \"_\".join(uname()).replace(\" \", \"_\"))\n logger = getLogger(__name__)\n logger.debug(msg)", "def create_output_folder(self):\n if not os.path.exists(self.current_path):\n os.mkdir(self.current_path)\n data_dir_by_date = datetime.datetime.now().strftime(\n \"data-%d-%b_%H-%M-%S\")\n self.date_path = os.path.join(self.current_path, data_dir_by_date)\n if not os.path.exists(self.date_path):\n os.mkdir(self.date_path)", "def _create_directories(self):\n print \"[--init] creating directory structure in %s\" % (self.target_path)\n ensure_path(self.conf_path)\n for subdir in config.PROCESSING_AREAS:\n subdir_path = self.data_path + os.sep + subdir\n ensure_path(subdir_path)", "def export_log(self):\r\n if self.log[\"datetime\"] is not None and not self.log[\"datetime\"] == \"\":\r\n logs_dir = ''\r\n user = 'default'\r\n program_data = 'data\\program_data.json5'\r\n with open(program_data) as f:\r\n config = json.load(f)\r\n logs_dir = config.get(\"logs_records_path\", \"\")\r\n user = config.get(\"user\", \"default\")\r\n file_name = user+\" \"+self.log[\"datetime\"].replace(\"/\", \"\")\r\n file_name = file_name.replace(\" \", \"_\")\r\n file_name = file_name.replace(\":\", \"\")\r\n cwd = os.getcwd()\r\n if not logs_dir == \"\" and os.path.exists(logs_dir):\r\n if not user in os.listdir(logs_dir):\r\n os.makedirs(os.path.join(logs_dir, user))\r\n logs_dir = os.path.join(logs_dir, user)\r\n file_name = os.path.join(logs_dir, file_name)\r\n self.save_records(file_name)\r\n elif \"logs\" in os.listdir(cwd):\r\n folder = os.path.join(cwd, \"logs\")\r\n file_name = os.path.join(folder, file_name)\r\n self.save_records(file_name)\r\n self.reset_values()", "def save_run_data(path_to_dir, hp):\n print('Saving run data to: {}'.format(path_to_dir))\n if os.path.isdir(path_to_dir):\n print('Data already exists in this directory (presumably from a previous run)')\n inp = input('Enter \"y\" if you are sure you want to remove all the old contents: ')\n if inp == 'y':\n print('Removing old contents')\n shutil.rmtree(path_to_dir)\n else:\n print('Exiting')\n raise SystemExit\n print('Creating directory and saving data')\n os.mkdir(path_to_dir)\n\n # Redirect stdout (print statements) to file\n # if not hp.debug:\n # sys.stdout = FlushFile(open(os.path.join(path_to_dir, 'stdout.txt'), 'w'))\n\n # Save snapshot of code\n snapshot_dir = os.path.join(path_to_dir, 'code_snapshot')\n if os.path.exists(snapshot_dir): # shutil doesn't work if dest already exists\n shutil.rmtree(snapshot_dir)\n copy_tree_ignore_except('.', snapshot_dir)\n\n # Save hyperparms\n save_file(vars(hp), os.path.join(path_to_dir, 'hp.json'), verbose=True)\n\n # Save some command used to run, start time\n with open(os.path.join(path_to_dir, 'run_details.txt'), 'w') as f:\n f.write('Command:\\n')\n cmd = ' '.join(sys.argv)\n start_time = datetime.now().strftime('%B%d_%H-%M-%S')\n f.write(cmd + '\\n')\n f.write('Start time: {}'.format(start_time))\n print('Command used to start program:\\n', cmd)\n print('Start time: {}'.format(start_time))", "def preparation(self):\n # [1] Makes a dir for saving results.\n # if 'Result' dir already exists,\n # a 'temporary' dir will be made.\n\n try:\n os.mkdir(self.dir_for_saving_result)\n except FileExistsError:\n self.viewer.display_message(\"Made a temporary directory.\")\n self.dir_for_saving_result = 'temporary'\n os.mkdir('temporary')\n\n # [2] Copies config file into the same dir as the one where results will be stored\n shutil.copy2(self.config_file_name, self.dir_for_saving_result)", "def create_log(self, num_machines):\n\n # generates a folder for logs if one does not exist\n os.makedirs('logs', exist_ok=True)\n\n # record extra info at the top of the log file\n extra_info = [f'num machines: {num_machines}', f'ticks per second: {self.ticks_per_second}', f'lifetime: {self.lifetime}']\n dummy_info_dict = {k:info for k, info in zip(LogEntry.ENTRY_ORDER, extra_info)}\n\n with open(self.log_filename, mode='a') as log_file:\n writer = csv.DictWriter(log_file, fieldnames=LogEntry.ENTRY_ORDER)\n writer.writerow(dummy_info_dict)\n writer.writeheader()", "def main_storer():\n config.config_storer()\n create_storer_paths()\n create_storer_git_repo()\n create_db()\n _logger.info(' -- storer init done setting up paths and db file.')", "def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info['use_it']:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n if not is_mc:\n continue\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable, process_name))\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n key_dir = getKey(process_name)\n\n outputFile = os.path.join(\n self.dirs[key_dir][DKEY_HISTO], \"%s.root\" % process_name\n )\n self.outputFiles[process_name] = {\n 'inputFiles' : [],\n 'outputFile' : outputFile,\n }\n if os.path.isfile(outputFile) and tools_is_file_ok(outputFile, min_file_size = 2000):\n logging.info('File {} already exists --> skipping job'.format(outputFile))\n continue\n\n for jobId in inputFileList.keys():\n\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = inputFileList[jobId]\n if len(self.inputFiles[key_file]) == 0:\n logging.warning(\n \"'%s' = %s --> skipping job !!\" % (key_file, self.inputFiles[key_file])\n )\n continue\n\n self.cfgFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.txt\" % (process_name, jobId)\n )\n self.outputFiles_tmp[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_HISTO_TMP], \"histogram_%i.root\" % jobId\n )\n self.logFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"project_%s_%i.log\" % (process_name, jobId)\n )\n self.scriptFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.sh\" % (process_name, jobId)\n )\n projection_module = self.projection_module\n if projection_module == \"count\":\n projection_module = \"countHistogramAll\"\n if sample_name.startswith('/TTTo'):\n projection_module += \"CompTopRwgt\"\n elif sample_info['sample_category'].startswith('ttH'):\n projection_module += \"CompHTXS\"\n elif isSplitByNlheJet(process_name):\n projection_module += \"SplitByLHENjet\"\n elif isSplitByNlheHT(process_name):\n projection_module += \"SplitByLHEHT\"\n elif isSplitByNlheJetHT(process_name, sample_name):\n projection_module += \"SplitByLHENjetHT\"\n self.jobOptions_sbatch[key_file] = {\n 'histName' : process_name,\n 'inputFiles' : self.inputFiles[key_file],\n 'cfgFile_path' : self.cfgFiles_projection[key_file],\n 'outputFile' : self.outputFiles_tmp[key_file],\n 'logFile' : self.logFiles_projection[key_file],\n 'scriptFile' : self.scriptFiles_projection[key_file],\n 'projection_module' : projection_module,\n }\n if self.projection_module != 'puHist':\n self.jobOptions_sbatch[key_file]['ref_genWeight'] = self.ref_genWeights[process_name]\n if process_name not in self.ref_genWeights:\n raise RuntimeError(\"Unable to find reference LHE weight for process %s\" % process_name)\n self.createCfg_project(self.jobOptions_sbatch[key_file])\n self.outputFiles[process_name]['inputFiles'].append(self.outputFiles_tmp[key_file])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable)\n self.num_jobs['project'] += self.createScript_sbatch(\n self.executable, self.sbatchFile_projection, self.jobOptions_sbatch\n )\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_project(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n if self.plot:\n self.addToMakefile_plot(lines_makefile)\n self.addToMakefile_finalHadd(lines_makefile)\n self.createMakefile(lines_makefile)\n logging.info(\"Done\")\n\n return self.num_jobs", "def setup_files(args):\n postfix = 'reinforce'\n has_value_model = False\n if args.baseline:\n postfix = \"reinforce-baseline\"\n has_value_model = True\n elif args.actor_critic:\n postfix = \"actor-critic\"\n has_value_model = True\n elif args.a2c:\n postfix = \"a2c\"\n has_value_model = True\n elif args.random:\n postfix = \"random\"\n\n # create the folder for log files\n try:\n os.mkdir(postfix)\n except FileExistsError:\n print(postfix, \" folder exists\")\n\n fileid = \"%s-%d\" % (postfix, int(time.time()))\n actor_weights = \"actor_weights-%s.h5\" % fileid\n actor_weights = os.path.join(postfix, actor_weights)\n encoder_weights = \"encoder_weights-%s.h5\" % fileid\n encoder_weights = os.path.join(postfix, encoder_weights)\n value_weights = None\n if has_value_model:\n value_weights = \"value_weights-%s.h5\" % fileid\n value_weights = os.path.join(postfix, value_weights)\n\n outdir = \"/tmp/%s\" % postfix\n\n misc = (postfix, fileid, outdir, has_value_model)\n weights = (actor_weights, encoder_weights, value_weights)\n\n return weights, misc", "def set_model_for_train(self):\n if self.train_time is None:\n self.train_time = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n\n self.curr_folder = data_functions.create_path(\n self.save_path, self.train_time)\n logger.info(f\"training results will be stored in: {self.curr_folder}\")\n\n self.save_model_params()\n self.train_generator, self.val_generator = \\\n self.clarifruit_train_val_generators()\n keras_logs_path = self.set_model_checkpint()\n\n return keras_logs_path", "def rebuild_logs(tdir):\n # Remove the old log with no timestamps\n kstest_log = os.path.join(tdir.name, \"kstest.log\")\n os.unlink(kstest_log)\n\n # Find all the test's kstest.log files and append them to kstest.log\n with open(kstest_log, \"w\") as ksf:\n for log in glob(os.path.join(tdir.name, \"*\", \"kstest.log\")):\n with open(log) as f:\n data = f.read(1024**2)\n while data:\n ksf.write(data)\n data = f.read(1024**2)", "def first_time_run():\n print('First time run - initializing...')\n\n if not os.path.isdir(USER_PROG_HOME_DIR): # this will be used for both the INI file and BAT file\n os.makedirs(USER_PROG_HOME_DIR)\n\n con_tmp = open(CONFIG_TEMPLATE, 'r')\n con_new = open(CONFIG_FILENAME, 'w')\n line = con_tmp.readline() # skip over first two lines\n line = con_tmp.readline() # ...\n\n con_new.write('# ' + __PROGRAM_NAME__ + 'V%s' % __VERSION__ + ' Configuration File\\n')\n con_new.write('# Creation: ' + time.strftime('%m/%d/%y') + '\\n')\n\n con_new.write('[DEFAULT]\\n')\n con_new.write('ProgramName = ' + __PROGRAM_NAME__ + '\\n')\n #con_new.write('UserHome = ' + USER_HOME_DIR.replace('\\\\', '/') + '\\n')\n con_new.write('UserHome = ' + USER_HOME_DIR + '\\n')\n for line in con_tmp:\n con_new.write(line)\n con_tmp.close()\n con_new.close()\n\n print('Writing %s' % CONFIG_FILENAME)\n\n bat_new = open(BAT_FILENAME, 'w')\n bat_new.write('REM ' + __PROGRAM_NAME__ + 'V%s' % __VERSION__ + ' Program Launch File\\n')\n bat_new.write('REM Creation: ' + time.strftime('%m/%d/%y') + '\\n')\n bat_new.write('cd ' + PACKAGE_DIR + '\\n')\n bat_new.write('python ' + __PROGRAM_NAME__ + '.py ' + ' '.join(sys.argv[1:]) + '\\n')\n bat_new.close()\n\n print('Writing %s' % BAT_FILENAME)", "def analyse_screening_setup(self):\n\n control = self.control\n logger: LoggerProperties\n\n # Perform some input checks\n # Check project path exists\n if control.project_path == \"\":\n msg = \"Cannot process: Project location not set.\"\n raise LoggerWarning(msg)\n\n # Check at least one logger exists\n if not control.loggers:\n msg = \"Cannot process: No loggers exist in setup.\"\n raise LoggerWarning(msg)\n\n # Check all ids are unique\n control.check_logger_ids()\n\n # Check logging durations and sample lengths are positive\n enabled_loggers = (logger for logger in control.loggers if logger.enabled)\n for logger in enabled_loggers:\n if logger.duration <= 0:\n msg = f\"Cannot process: Logging duration for logger {logger.logger_id} is {logger.duration}.\\n\"\n f\"Logging duration must be greater than zero.\"\n raise LoggerWarning(msg)\n\n # TODO: Move to logger properties as a setup function\n if control.global_process_stats is True and logger.process_stats is True:\n if logger.stats_interval <= 0:\n msg = f\"Cannot process: Statistics sample length for logger \"\n f\"{logger.logger_id} is {logger.stats_interval}.\\n\"\n f\"Statistics sample length must be greater than zero.\"\n raise LoggerWarning(msg)\n\n if control.global_process_spect is True and logger.process_spect is True:\n if logger.spect_interval <= 0:\n msg = f\"Cannot process: Spectral sample length for logger \"\n f\"{logger.logger_id} is {logger.spect_interval}.\\n\"\n f\"Spectral sample length must be greater than zero.\"\n raise LoggerWarning(msg)\n\n # Paths to output folders\n control.set_output_paths()\n\n # Get raw filenames, check timestamps and select files in processing datetime range\n enabled_loggers = (logger for logger in control.loggers if logger.enabled)\n for logger in enabled_loggers:\n # Store logger filenames and check file timestamps\n self.statusbar.showMessage(\n f\"Checking setup: Checking file names for {logger.logger_id}. Please wait...\"\n )\n self.repaint()\n logger.get_filenames()\n\n # Select files to process and, if applicable, check file timestamps are valid\n logger.set_files_to_process()\n\n # Store expected file length\n logger.expected_data_points = logger.freq * logger.duration\n\n # Get all channel names and units if not already stored in logger object\n if len(logger.all_channel_names) == 0 and len(logger.all_channel_units) == 0:\n logger.get_all_columns()\n\n # Update column list in config dashboard if this logger is the one selected\n if logger.logger_id == self.inputDataModule.loggerList.currentItem().text():\n self.inputDataModule.set_logger_columns_list(logger)\n\n # Check requested channels exist\n # Connect warning signal to warning message box in DataLab class\n try:\n # Disconnect any existing connection to prevent repeated triggerings\n logger.logger_warning_signal.disconnect()\n except TypeError:\n pass\n logger.logger_warning_signal.connect(self.warning)\n\n # Set processed channel names and units as user values, if supplied, or file header values\n logger.set_selected_column_and_units_names()\n\n # Check for any columns without any units set and see if the units is embedded in the channel name;\n # if so extract units from channel name and add to units list\n logger.check_if_units_in_channel_name()\n\n # Check number of headers match number of columns to process\n # TODO: This should already have been enforced earlier so perhaps no longer required?\n logger.check_headers()", "def _SetProgramDir(self):\n p = self.params\n\n # Program dirs are where the summaries are written to.\n if p.task_name:\n program_dir_name = f'{p.task_name}_{p.name}_{p.dataset_name.lower()}'\n else:\n program_dir_name = f'{p.name}_{p.dataset_name.lower()}'\n self._program_dir = os.path.join(self._logdir, program_dir_name)\n\n pdir = epath.Path(self._program_dir)\n pdir.mkdir(parents=True, exist_ok=True)\n (pdir / 'params.txt').write_text(p.ToText())", "def init(ctx, project_root, mkdir, level, formatter, log_path):\n conf_content = get_tpl('logme', level=level, formatter=formatter, filename=log_path)\n\n config = get_config(conf_content)\n\n abs_path = Path(project_root).resolve()\n conf_location = abs_path.joinpath('logme.ini')\n\n if not abs_path.exists():\n if not mkdir:\n raise NotADirectoryError(f\"{abs_path.parent.resolve() / project_root} does not exist. If you'd \"\n f\"like to make the directory, please use '-mk' flag.\")\n else:\n abs_path.mkdir(parents=True, exist_ok=True)\n\n with conf_location.open('w') as conf:\n config.write(conf)", "def begin(self):\n\n env = self.context.lookup(\"/environment\")\n\n self._test_results_dir = env[\"output_directory\"]\n self._starttime = env[\"starttime\"]\n self._runid = env[\"runid\"]\n\n self._result_filename = os.path.join(self._test_results_dir, \"testrun_results.jsos\")\n self._summary_filename = os.path.join(self._test_results_dir, \"testrun_summary.json\")\n self._import_errors_filename = os.path.join(self._test_results_dir, \"import_errors.jsos\")\n\n return", "def log_setup():\n logger = logging.getLogger('diskover')\n logger_warn = logging.getLogger('diskover_warn')\n eslogger = logging.getLogger('elasticsearch')\n diskover_eslogger = logging.getLogger('diskover_elasticsearch')\n loglevel = config['logLevel'].get()\n if options.debug:\n loglevel = 'DEBUG'\n if loglevel == 'DEBUG':\n loglevel = logging.DEBUG\n elif loglevel == 'INFO':\n loglevel = logging.INFO\n else:\n loglevel = logging.WARN\n logformat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n if logtofile:\n # create log file name using top dir names and datestamp\n treedirsstr = ''\n if args:\n n = 1\n dirs = args[0:]\n x = len(dirs)\n for d in dirs:\n if d != '/':\n d = d.rstrip('/')\n treedirsstr += os.path.basename(d)\n if n < x:\n treedirsstr += '_'\n n += 1\n else:\n treedirsstr = os.path.basename(os.getcwd())\n logfiletime = datetime.now().isoformat()\n logname = 'diskover_' + treedirsstr + '_' + logfiletime + '.log'\n logfile = os.path.join(logdir, logname)\n handler_file = logging.FileHandler(logfile)\n handler_file.setFormatter(logging.Formatter(logformat))\n logger.setLevel(loglevel)\n logger.addHandler(handler_file)\n # console logging\n handler_con = logging.StreamHandler()\n handler_con.setFormatter(logging.Formatter(logformat))\n logger.addHandler(handler_con)\n # warnings log\n logname_warn = 'diskover_' + treedirsstr + '_' + logfiletime + '_warnings.log'\n logfile_warn = os.path.join(logdir, logname_warn)\n handler_warnfile = logging.FileHandler(logfile_warn)\n handler_warnfile.setFormatter(logging.Formatter(logformat))\n logger_warn.setLevel(logging.WARN)\n logger_warn.addHandler(handler_warnfile)\n # es logger\n eslogger.setLevel(logging.WARN)\n eslogger.addHandler(handler_file)\n eslogger.addHandler(handler_con)\n # diskover es logger\n diskover_eslogger.setLevel(loglevel)\n diskover_eslogger.addHandler(handler_file)\n diskover_eslogger.addHandler(handler_con)\n else:\n handler_file = None\n handler_warnfile = None\n handler_con = None\n logging.basicConfig(format=logformat, level=loglevel)\n eslogger.setLevel(logging.WARN)\n return logger, logger_warn, loglevel, logformat, \\\n handler_file, handler_warnfile, handler_con", "def test_log_file_created(self, mock_parsing_handler, mock_api_handler, mock_progress):\n\n directory = path.join(path_to_module, \"fake_ngs_data\")\n directory_status = DirectoryStatus(directory)\n log_file = path.join(directory, \"irida-uploader.log\")\n # Check that log file does not exist before starting\n self.assertFalse(path.exists(log_file))\n\n cli_entry._validate_and_upload(directory_status, False)\n\n # Make sure log file is created\n self.assertTrue(path.exists(log_file))" ]
[ "0.7466747", "0.7157217", "0.70655835", "0.69699013", "0.69102144", "0.6875181", "0.6774886", "0.67428565", "0.6716854", "0.6689758", "0.66799164", "0.6649505", "0.6647565", "0.6583148", "0.6562383", "0.6534038", "0.6531846", "0.65123534", "0.6502982", "0.64945877", "0.64834994", "0.64663285", "0.6453367", "0.6436292", "0.6435524", "0.63895667", "0.637823", "0.6367656", "0.63618994", "0.63497543", "0.63406366", "0.63289", "0.6323452", "0.6296784", "0.62840384", "0.6268718", "0.62638736", "0.62343436", "0.62166697", "0.6193909", "0.6159205", "0.61286485", "0.60849714", "0.6084301", "0.6067813", "0.60623944", "0.60563093", "0.60502887", "0.6041974", "0.6030471", "0.60237986", "0.6015637", "0.60153264", "0.6013318", "0.60042965", "0.6002346", "0.599771", "0.5992168", "0.59918594", "0.59892535", "0.5979626", "0.5972177", "0.59639186", "0.59630656", "0.5954982", "0.59490454", "0.5938839", "0.5938645", "0.5918243", "0.5917884", "0.59123", "0.5891963", "0.5870059", "0.58542955", "0.5847409", "0.5842411", "0.5828564", "0.5820177", "0.5815213", "0.5807858", "0.57997304", "0.579923", "0.579695", "0.5793636", "0.57914674", "0.57903445", "0.57893676", "0.5782026", "0.57737064", "0.5772072", "0.57695395", "0.5769117", "0.57685935", "0.57640713", "0.5763709", "0.57600254", "0.57521904", "0.57505125", "0.5739292", "0.573552" ]
0.7053942
3
Sends the goal vehicle pose to the simulator
def pub_goal_vehicle_pose(self): header = Header() header.stamp = rospy.Time.now() position = Point(20.5, -10, -85) # position = Point(20.5, -10, -85) yaw = pi # Converting yaw to quaternion # See https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles # For better intuition about quaternions: https://eater.net/quaternions orientation = Quaternion(*rpy_to_quaterion(0, 0, yaw)) pose = Pose(position, orientation) pose_stamped_msg = PoseStamped(header, pose) self.pub_vehicle_pose.publish(pose_stamped_msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_destination(self):\n\n print('send the target to the robot')\n move_base_action_goal=MoveBaseActionGoal()\n move_base_action_goal.goal.target_pose.header.frame_id=\"map\"\n move_base_action_goal.goal.target_pose.pose.orientation.w=1\n move_base_action_goal.goal.target_pose.pose.position.x=self.x_des\n move_base_action_goal.goal.target_pose.pose.position.y=self.y_des\n print('des_x='+str(self.x_des))\n print('des_y='+str(self.y_des))\n self.des_pub.publish(move_base_action_goal)", "def sendPose(self,pose):\n self.broadcaster.sendPose(pose)", "def publish_goal(self, pose):\n # type: (Pose) -> None\n # Elias way\n # client = actionlib.SimpleActionClient('move_base', MoveBaseAction)\n # client.wait_for_server()\n # self.goal = MoveBaseGoal()\n # self.goal.target_pose.header.frame_id = \"map\"\n # self.goal.target_pose.header.stamp = rospy.Time.now()\n # self.goal.target_pose.pose = pose\n\n # client.send_goal(self.goal)\n # wait = client.wait_for_result()\n # if not wait: \n # rospy.logerr(\"Action server not available!\")\n # rospy.signal_shutdown(\"Action server not available!\")\n\n \n\n\n # arena-rosnav way\n print(\"test\")\n self._global_path = Path()\n self._old_global_path_timestamp = self._global_path.header.stamp\n goal = PoseStamped()\n goal.header.stamp = rospy.Time.now()\n goal.header.frame_id = \"map\"\n goal.pose = pose\n self._goal_pub.publish(goal)\n # added by Elias for communication with move_base\n #self.pub_mvb_goal.publish(goal)", "def move_robot(self, pose):\n # type: (Pose) -> None\n start_pos = ModelState()\n start_pos.model_name = 'turtlebot3'\n start_pos.pose = pose\n rospy.wait_for_service('/gazebo/set_model_state')\n try:\n set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)\n resp = set_state(start_pos)\n\n except rospy.ServiceException:\n print(\"Move Robot to position failed\")\n\n pub = rospy.Publisher('/initialpose', PoseWithCovarianceStamped, queue_size = 10)\n rospy.sleep(3)\n start_pos = PoseWithCovarianceStamped()\n start_pos.header.frame_id = 'map'\n start_pos.pose.pose = pose \n pub.publish(start_pos)", "def sendPose(self,pose):\n x,y,z,yaw,pitch,roll,ts = pose\n data = _RobotCommunicator.POSE_HEADER + \\\n pack(_RobotCommunicator.POSE_FORMAT,x,y,z,yaw,pitch,roll,ts)\n self.udpSock.sendto(data,self.addr)", "def test_vw_controller(self):\n pass\n\n yarp.Network.init()\n\n pose_stream = yarp.BufferedPortBottle()\n pose_stream.open(\"/morse/test/pose/in\")\n yarp.Network.connect(\"/morse/robots/ATRV/Pose/out\", \"/morse/test/pose/in\")\n\n cmd_stream = yarp.BufferedPortBottle()\n cmd_stream.open(\"/morse/test/vw/out\")\n yarp.Network.connect(\"/morse/test/vw/out\", \"/morse/robots/ATRV/Motion_Controller/in\")\n \n # Read the start position, it must be (0.0, 0.0, 0.0)\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n\n send_speed(cmd_stream, 1.0, 0.0, 2.0)\n\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, -1.0, 0.0, 2.0)\n\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, 1.0, -math.pi/4.0, 2.0)\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 4.0 / math.pi, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), -4.0 / math.pi , delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), -math.pi/2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, 0.5, -math.pi/8.0, 12.0)\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, -2.0, math.pi/2.0, 3.0)\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 4.0 / math.pi, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), -4.0 / math.pi , delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), -math.pi/2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n yarp.Network.fini()", "def goToPose(self,pose,speed=DEFAULT_SPEED):\n\n print(\"Moving to {}\".format(pose.description))\n gpio = pose.gpio\n if gpio != None:\n pin = gpio.get('pin',None)\n value = gpio.get('value',0)\n if pin != None:\n if type(pin) is str:\n self.robot.set_io({pin:value})\n elif pin != -1:\n GPIO.output(pin, {0:False,1:True}[value])\n self.waitFor(gpio.get('delay',2))\n return 'Set Io: '+str(pin)+' value: '+str(value)\n print(gpio)\n newSpeed = speed\n if pose.speed != None:\n newSpeed = pose.speed\n cmd = self.getMovementCommand(pose,speed=newSpeed)\n print(cmd)\n self.robot.play(cmd)\n self.waitForCompletion()\n print(\"Completed move\")", "def navToPose(goal):\n #compute angle required to make straight-line move to desired pose\n global xPosition\n global yPosition\n global theta\n #capture desired x and y positions\n desiredY = goal.pose.position.y\n desiredX = goal.pose.position.x\n #capture desired angle\n quat = goal.pose.orientation\n q = [quat.x, quat.y, quat.z, quat.w]\n roll, pitch, yaw = euler_from_quaternion(q)\n desiredT = yaw * (180.0/math.pi)\n #compute distance to target\n distance = math.sqrt(math.pow((desiredX - xPosition), 2) + math.pow((desiredY - yPosition), 2))\n adjustedX = goal.pose.position.x - xPosition\n adjustedY = goal.pose.position.y - yPosition\n print goal.pose.position.x, goal.pose.position.y\n print xPosition, yPosition\n print adjustedX, adjustedY\n #compute initial turn amount\n initialTurn = (math.atan2(adjustedY, adjustedX) * (180 / math.pi)) - theta\n\n print \"moving from (\" + str(xPosition) + \", \" + str(yPosition) + \") @ \" + str(theta) + \" degrees\"\n print \"moving to (\" + str(desiredX) + \", \" + str(desiredY) + \") @ \" + str(desiredT) + \" degrees\"\n print \"distance: \" + str(distance) + \", initial turn: \" + str(initialTurn)\n rotateDegrees(initialTurn)\n driveSmooth(0.25, distance)\n rospy.sleep(2)\n finalTurn = desiredT - theta\n rotateDegrees(finalTurn)", "def move2goal(self):\n vel_msg = Twist()\n\n # Linear velocity in the x-axis.\n vel_msg.linear.x = 0.4 # m/s\n vel_msg.linear.y = 0\n vel_msg.linear.z = 0\n\n # Angular velocity in the z-axis.\n vel_msg.angular.x = 0\n vel_msg.angular.y = 0\n vel_msg.angular.z = 1.5 # rad/s\n\n # Starting point reference\n goal_x = 1.0 \n goal_y = 1.0\n x_ref = 1.0\n y_ref = 1.0\n\n # Previous Reference\n x_prev_ref = 0.0\n y_prev_ref = 0.0\n theta_prev_ref = self.theta\n vrefA = 0.5\n wrefA = 0.0\n \n i = 0\n tPx, tPy, tPTheta = self.initiate_trajectory(\n x_ref, y_ref, vel_msg, \n x_prev_ref, y_prev_ref, \n theta_prev_ref, vrefA, wrefA\n )\n\n x_prev_ref = tPx[0]\n y_prev_ref = tPy[0]\n theta_prev_ref = tPTheta[0]\n\n print(f'X TRAJECTORY: {tPx}\\nY TRAJECTORY: {tPy}\\nTHETA TRAJ: {tPTheta}')\n print(f'ACTUAL THETA: {self.theta}')\n\n while not rospy.is_shutdown():\n \n if i >= 8:\n i = 0\n\n x_ref = goal_x\n y_ref = goal_y\n\n tPx, tPy, tPTheta = self.initiate_trajectory(\n x_ref, y_ref, vel_msg, \n x_prev_ref, y_prev_ref, \n theta_prev_ref, vrefA, wrefA\n )\n # inputRef = ControllerInput(\n # xref=x_ref,\n # yref=y_ref,\n # RstateX=self.x_position,\n # RstateY=self.y_position,\n # RstateTheta=self.theta,\n # RstateVelocity=vel_msg.linear.x,\n # RstateW=vel_msg.angular.z,\n # xrefA=x_prev_ref,\n # yrefA=y_prev_ref,\n # thetarefA=theta_prev_ref,\n # vrefA=vrefA,\n # wrefA=wrefA\n # )\n\n # rospy.loginfo(f'X: {self.x_position} \\tY: {self.y_position}\\t Theta: {self.theta} ')\n # nmpc = NMPC_Controller(inputRef)\n # tPx, tPy, tPTheta = nmpc.test_create_mini_path()\n\n # print(f'X TRAJECTORY: {tPx}\\nY TRAJECTORY: {tPy}\\nTHETA TRAJ: {tPTheta}')\n # print(f'ACTUAL THETA: {self.theta}')\n \n # new_v, new_w = nmpc.start_optmizer()\n # new_v = round(new_v, 4)\n # new_w = round(new_w, 4)\n\n # print(new_v, new_w)\n # rospy.loginfo(\n # f'X: {self.x_position}, Y: {self.y_position}, THETA: {self.theta}')\n \n # self.velocity_publisher.publish(vel_msg)\n # x_prev_ref = self.x_position\n # y_prev_ref = self.y_position\n # theta_prev_ref = self.theta\n # vrefA = vel_msg.linear.x\n # wrefA = vel_msg.angular.z\n \n\n # theta_prev_ref = self.theta\n # vel_msg.angular.z = 0.0\n\n\n '''Update the linear & angular velocity'''\n # vel_msg.linear.x = new_v\n # vel_msg.angular.z = new_w\n\n if i < 8:\n inputRef = ControllerInput(\n xref = tPx[i],\n yref = tPy[i],\n RstateX = self.x_position,\n RstateY = self.y_position,\n RstateTheta = self.theta,\n RstateVelocity = vel_msg.linear.x,\n RstateW = vel_msg.angular.z,\n xrefA = x_prev_ref,\n yrefA = y_prev_ref,\n thetarefA = theta_prev_ref,\n vrefA = vrefA,\n wrefA = wrefA\n )\n\n nmpc = NMPC_Controller(inputRef)\n new_v, new_w = nmpc.start_optmizer()\n new_v = round(new_v, 4)\n new_w = round(new_w, 4)\n\n print(f'(actual) X: {self.x_position}, Y: {self.x_position}, THETA: {self.theta}')\n print(f'(desired) X: {tPx[i]}, Y: {tPy[i]}')\n print(f'V: {vel_msg.linear.x}\\tW: {vel_msg.angular.z}')\n\n x_prev_ref = tPx[i-1]\n y_prev_ref = tPy[i-1]\n theta_prev_ref = tPTheta[i-1]\n vrefA = vel_msg.linear.x\n wrefA = vel_msg.angular.z\n\n vel_msg.linear.x = new_v\n vel_msg.angular.z = new_w\n # vel_msg.angular.z = 0.0\n\n print(f'index: {i}')\n\n distance = math.sqrt((self.x_position - tPx[i])**2 + (self.y_position - tPy[i])**2)\n if distance < 0.3:\n print(f'Distance: {distance}')\n i+=1\n\n\n self.velocity_publisher.publish(vel_msg)\n self.rate.sleep()\n\n rospy.spin()", "def set_goal(self,pos):\n goal = MoveBaseGoal()\n goal.target_pose.header.frame_id = 'map'\n goal.target_pose.header.stamp = rospy.Time.now()\n mygoal = Pose(Point(pos[0],pos[1],0),Quaternion(0,0,0,1))\n goal.target_pose.pose = mygoal\n self.move_base.send_goal(goal)", "def p2p_drive_routine(self, goal_pos):\n\t\t(position, rotation) = self.get_odom() # get starting position values\n\n\t\tcurr_pose = self.call_jackal_pos_service(0) # don't drive, just get current lat/lon\n\n\t\tprint(\"Current position from pose server: {}\".format(curr_pose))\n\t\tprint(\"Positions attributes: {}\".format(dir(curr_pose)))\n\n\t\t_lat = curr_pose.jackal_fix.latitude\n\t\t_lon = curr_pose.jackal_fix.longitude\n\n\t\tprint(\"Jackal's current lat, lon: {}, {}\".format(_lat, _lon))\n\n\t\tcurr_pose_utm = utm.from_latlon(curr_pose.jackal_fix.latitude, curr_pose.jackal_fix.longitude)\n\n\t\tprint(\"Jackal's position in UTM: {}\".format(curr_pose_utm))\n\n\t\tA = (curr_pose_utm[0], curr_pose_utm[1], rotation)\n\t\tB = (goal_pos[0], goal_pos[1], rotation)\n\n\t\tx_diff = B[0] - A[0]\n\t\ty_diff = B[1] - A[1]\n\n\t\t_trans_angle = self.transform_imu_frame(degrees(A[2]))\n\t\tAB_theta0 = math.atan2(abs(y_diff), abs(x_diff)) # get intitial angle, pre transform\n\t\tAB_angle = self.transform_angle_by_quadrant(AB_theta0, x_diff, y_diff) # determine angle between vector A and B\n\t\tturn_angle = AB_angle - _trans_angle # angle to turn (signage should denote direction to turn)\n\n\t\tprint(\"Initial position and orientation: {}\".format(A))\n\t\tprint(\"Current angle in degrees: {}\".format(degrees(A[2])))\n\t\tprint(\"Transformed angle: {}\".format(_trans_angle))\n\t\tprint(\"AB initial angle: {}\".format(degrees(AB_theta0)))\n\t\tprint(\"AB angle after transform: {}\".format(AB_angle))\n\t\tprint(\"Calculated turning angle: {}\".format(turn_angle))\n\n\t\t# Determine angle to turn based on IMU..\n\t\tprint(\"Telling Jackal to turn {} degreess..\".format(turn_angle))\n\t\tself.call_jackal_rot_service(turn_angle)\n\t\tprint(\"Finished turning..\")\n\n\t\tdrive_distance = self.determine_drive_distance(A, B)\n\t\tprint(\"Driving Jackal {} meters..\".format(drive_distance))\n\t\tself.call_jackal_pos_service(drive_distance)\n\t\tprint(\"Finished driving..\")", "def execute(self, currentPose, targetPose):\n\n currentEuler = quaternion_to_euler(currentPose.pose.orientation.x,\n currentPose.pose.orientation.y,\n currentPose.pose.orientation.z,\n currentPose.pose.orientation.w)\n \n target_x = targetPose[0]\n target_y = targetPose[1]\n target_v = targetPose[2]\n #print(\"({},{},{})\".format(target_x, target_y, target_v))\n \n k_s = 0.1\n k_ds = 1\n k_n = 0.1\n\n #compute errors\n xError = (target_x - currentPose.pose.position.x) * np.cos(currentEuler[2]) + (target_y - currentPose.pose.position.y) * np.sin(currentEuler[2])\n yError = -(target_x - currentPose.pose.position.x) * np.sin(currentEuler[2]) + (target_y - currentPose.pose.position.y) * np.cos(currentEuler[2])\n curr_v = np.sqrt(currentPose.twist.linear.x**2 + currentPose.twist.linear.y**2)\n vError = target_v - curr_v\n \n delta = k_n*yError\n # Checking if the vehicle need to stop\n if target_v > 0:\n v = xError*k_s + vError*k_ds\n else:\n v = xError*k_s - 0.05*k_ds \n\n #print(\"{}, {}\".format(v, delta))\n #Send computed control input to vehicle\n newAckermannCmd = AckermannDrive()\n newAckermannCmd.speed = v\n newAckermannCmd.steering_angle = delta\n self.controlPub.publish(newAckermannCmd)", "def navToPose(self,goal):\n goalX=(goal.pose.position.x - self._current.position.x)\n goalY=(goal.pose.position.y - self._current.position.y)\n goalDistance=((goalX**2)+(goalY**2))**(.5)\n goalAngle=math.radians(math.atan2(goalY,goalX))\n\n self.rotate(goalAngle)\n time.sleep(2)\n self.driveStraight(1,goalDistance)", "def goToPose(self, pose):\n self.debug(\"Waiting for 'NavigateToPose' action server\")\n while not self.nav_to_pose_client.wait_for_server(timeout_sec=1.0):\n self.info(\"'NavigateToPose' action server not available, waiting...\")\n\n goal_msg = NavigateToPose.Goal()\n goal_msg.pose = pose\n\n self.info('Navigating to goal: ' + str(pose.pose.position.x) + ' ' +\n str(pose.pose.position.y) + '...')\n send_goal_future = self.nav_to_pose_client.send_goal_async(goal_msg,\n self._feedbackCallback)\n rclpy.spin_until_future_complete(self, send_goal_future)\n self.goal_handle = send_goal_future.result()\n\n if not self.goal_handle.accepted:\n self.error('Goal to ' + str(pose.pose.position.x) + ' ' +\n str(pose.pose.position.y) + ' was rejected!')\n return False\n\n self.result_future = self.goal_handle.get_result_async()\n return True", "def Run(self):\n # Waits until the action server has started up and started\n # listening for goals.\n self._MoveBaseClient.wait_for_server()\n rospy.loginfo(\"move_base is UP!\")\n\n goal = move_base_msgs.msg.MoveBaseGoal()\n # print(\"Empty goal:\")\n # print(goal)\n # Note that move_base will not go to an all zero target.\n\n # Grab a static copy of the current pose to work with\n # Otherwise it might change under our feet!\n \"\"\"\n Note, the actual pose on the map is not the same as this,\n but there is not map based pose.\n What there is the odometry based pose, and then a transform\n from the odometry to the map.\n Retriving the transform, combining it with the odom pose\n and making use of it is a future exercise.\n \"\"\"\n current_odom = self.currentOdom\n # print(\"Current odom:\")\n # print(current_odom)\n print(\"current_odom.pose:\")\n print(current_odom.pose)\n # rospy.Subscriber(\"cmd_vel\", Twist, self._HandleVelocityCommand)\n\n rosNow = rospy.Time.now()\n # we'll create a goal to send to move_base\n goal.target_pose.header.frame_id = \"base_link\"\n goal.target_pose.header.stamp = rosNow\n\n # This will move forward 1 meter from 0\n # goal.target_pose.pose.position.x = 0.0\n # goal.target_pose.pose.orientation.w = 1.0\n\n # Set new pose to same as current pose\n \"\"\"\n You have to set .position and .orientation,\n not .pose because the current_odom.pose\n includes covariance, the other cannot take\n \"\"\"\n goal.target_pose.pose.position = current_odom.pose.pose.position\n goal.target_pose.pose.orientation = current_odom.pose.pose.orientation\n \"\"\"\n If the odometry, which is tied to /base_link, was identical\n to the map location, this would tell it to go nowhere,\n but what we actually end up doing here is telling move_base\n to move the robot the difference between the odom (/base_link)\n and the map. :)\n \"\"\"\n \"\"\"\n a quick and easy way to get the transform from the /map to /base_link is to use the command-line tool:\n rosrun tf tf_echo /map /base_link\n So how do I combine this myself?\n \"\"\"\n\n # Rotate currentPose by 90 degrees\n quaternion_difference = tf2_ros.transformations.quaternion_about_axis(\n 0.123, (1, 0, 0)\n )\n # print(\"quaternion_difference:\")\n # print(quaternion_difference)\n\n print(\"Populated goal:\")\n print(goal.target_pose.pose)\n\n rospy.loginfo(\"Sending goal\")\n # Sends the goal to the action server.\n self._MoveBaseClient.send_goal(goal)\n\n rospy.loginfo(\"Waiting for response . . .\")\n # Waits for the server to finish performing the action.\n self._MoveBaseClient.wait_for_result()\n # This could wait a VERY long time,\n # if the move_base doesn't have a timeout it will never come back,\n # in most cases it does, but it seems in some cases it will retry forever.\n # http://docs.ros.org/api/actionlib/html/classactionlib_1_1simple__action__client_1_1SimpleActionClient.html#a460c9f52fd650f918cb287765f169445\n\n result = self._MoveBaseClient.get_result()\n # rospy.loginfo(result)\n result = self._MoveBaseClient.get_state()\n # rospy.loginfo(result)\n\n current_odom = self.currentOdom\n print(\"New odom:\")\n print(current_odom.pose)\n\n rospy.loginfo(\"Ok, now what?\")", "def _rviz_nav_goal_cb(self, msg):\n goal = Pose2D(x=msg.pose.position.x, y=msg.pose.position.y)\n tolerance = 0.0\n\n self.drive_to(goal, tolerance, avoid_targets=True, avoid_home=False)", "def move(goal_pose, has_orientation_constraint=False, do_precise_movement=False):\n if type(goal_pose) is dict:\n right_arm.set_joint_target_value(default_joints)\n else:\n right_arm.set_pose_target(goal_pose)\n right_arm.set_start_state_to_current_state()\n\n if (has_orientation_constraint):\n orien_const = OrientationConstraint()\n orien_const.link_name = \"right_gripper\";\n orien_const.header.frame_id = \"base\";\n orien_const.orientation= pose.orientation\n orien_const.absolute_x_axis_tolerance = 0.05;\n orien_const.absolute_y_axis_tolerance = 0.05;\n orien_const.absolute_z_axis_tolerance = 0.05;\n orien_const.weight = 1.0;\n consts = Constraints()\n consts.orientation_constraints = [orien_const]\n right_arm.set_path_constraints(consts)\n\n if do_precise_movement:\n right_arm.set_goal_position_tolerance(0.005) \n right_arm.set_max_velocity_scaling_factor(0.10) # make it slow\n right_arm.set_num_planning_attempts(10) # take best of 5 for accuracy of 5mm\n else:\n right_arm.set_max_velocity_scaling_factor(0.5) # make it slower\n right_arm.set_goal_position_tolerance(0.01) \n right_arm.set_num_planning_attempts(5) # take best of 3 for accuracy of 1 cm\n print pose\n\n\n right_plan = right_arm.plan()\n right_arm.execute(right_plan)", "def execute(self):\n self._odom_msg.header.stamp = rospy.Time.now()\n # query base state from robot and store in odom msg\n position, orientation, linear_velocity, angular_velocity = self._robot.get_base_state()\n [self._odom_msg.pose.pose.position.x,\n self._odom_msg.pose.pose.position.y,\n self._odom_msg.pose.pose.position.z] = position\n [self._odom_msg.pose.pose.orientation.x,\n self._odom_msg.pose.pose.orientation.y,\n self._odom_msg.pose.pose.orientation.z,\n self._odom_msg.pose.pose.orientation.w] = orientation\n [self._odom_msg.twist.twist.linear.x,\n self._odom_msg.twist.twist.linear.y,\n self._odom_msg.twist.twist.linear.z] = linear_velocity\n [self._odom_msg.twist.twist.angular.x,\n self._odom_msg.twist.twist.angular.y,\n self._odom_msg.twist.twist.angular.z] = angular_velocity\n self._publisher.publish(self._odom_msg)\n\n tf_msg = TransformStamped()\n tf_msg.header.frame_id = self._odom_msg.header.frame_id\n tf_msg.child_frame_id = self._odom_msg.child_frame_id\n tf_msg.transform.translation = self._odom_msg.pose.pose.position\n tf_msg.transform.rotation = self._odom_msg.pose.pose.orientation\n tf_msg.header.stamp = rospy.Time.now()\n self._br.sendTransform(tf_msg)", "def sendMouvementStep(positions):\n\tprogMode(True) # Active le couple des servos\n\tfor servo in positions: # Pour chaque servo\n\t\t# Ecrit la position dans le registre de chaque servo\n\t\taxDriver.setPosition(servo, positions[servo])\n\taxDriver.action(axDriver.BROADCASTID) # Tous les servos bougent", "def simulation_step(self):\n if not self.np_trajectory.size:\n #No trajectory to go to.....\n return\n closest_ind = self.find_closest_trajectory_pose()\n ref_ind = (closest_ind + 30) # closest_ind + numpy.round(self.v / 4)\n traj_len = len(self.np_trajectory[0])\n if self.loop is True:\n ref_ind = ref_ind % traj_len\n else:\n if ref_ind > traj_len-1:\n ref_ind = traj_len-1\n if closest_ind == traj_len-1:\n self.at_dest = True\n else:\n ref_ind = closest_ind\n ref_state = self.np_trajectory[:, int(ref_ind)]\n\n # update vehicle state.\n '''if self.class_name == 'TruckVehicle':\n self.update_vehicle_state_qualisys()\n self.UDP_receive()\n if self.data == \"-1.00\":\n self.set_control_commands_pp(ref_state, ref_ind)\n else:\n steer = int(self.data[-6:-3])\n throttle = int(self.data[:-6]) + 5\n hw_port.set_command(throttle,steer,2)\n self.update_truck_hardware()\n else:\n self.set_control_commands(ref_state)\n self.update_vehicle_state()'''\n\n self.set_control_commands(ref_state, ref_ind)\n self.update_vehicle_state()\n\n # publish vehicle state.\n vehicle_state = msgs.VehicleState(self.vehicle_id, self.class_name,\n self.x, self.y, self.yaw, self.v)\n self.pub_state.publish(vehicle_state)\n self.update_current_node()\n\n #The way that the stop light waiting works, this is necessary\n if not self.waiting_at_stop:\n self.check_for_traffic_light()\n self.get_traffic()", "def execute(self):\n self.status_message = \"State: Execute - Executing Motion Plan\"\n self.current_state = \"execute\"\n self.next_state = \"idle\"\n collect_info = threading.Thread(\n target=self.write_joint_pos, args=(\"notp_wp\",))\n collect_info.start()\n for wp in self.waypoints:\n # Ensure the correct number of joint angles\n full_wp = [0.0] * self.rexarm.num_joints\n full_wp[0:len(wp)] = wp\n # TODO: Send the waypoints to the trajectory planner and break if estop\n if self.next_state == \"estop\":\n break\n self.rexarm.set_positions(full_wp)\n time.sleep(1.5)", "def send_proposes(self):\n neighbors = self.model.space.get_neighbors(self.pos, self.range, include_center=False)\n neighbors = list(filter(lambda x: x.type == 'guest', neighbors))\n\n if len(neighbors) > 0:\n options = list(map(lambda x: (x.role, self.action), neighbors))\n know = list(map(lambda x: self.knowledge[x], options))\n # print(\"Knowledges\", probs)\n probs = list(map(lambda x: np.exp(x), know))\n # print(\"Softmax\", probs)\n probs = list(map(lambda x: x / sum(probs), probs))\n # print(\"Normed\", probs)\n if len(neighbors) > 1:\n print(self.unique_id, neighbors, probs, know)\n\n other_agent = random.choices(neighbors, probs)[0]\n self.propose_interaction(other_agent, self.action)", "def send_scene_informations(self):\n self.send_player_position()\n self.send_player_direction()\n self.send_grafik_objects()", "def handle_pose(msg):\n global sensor_cfg\n global no_position\n global body_frame\n global frame_cfg\n\n quat = np.array([msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w])\n pos = np.array([msg.pose.position.x*1000, msg.pose.position.y*1000, msg.pose.position.z*1000])\n\n if position_mode == \"zero_pos\":\n pos = np.array([0, 0, 0])\n elif position_mode == \"relative\":\n pos = pos - parent_position\n\n br = tf.TransformBroadcaster()\n\n br.sendTransform(pos,\n quat,\n msg.header.stamp,\n body_frame,\n msg.header.frame_id)\n\n for k in frame_cfg:\n br.sendTransform(np.array([float(x) for x in frame_cfg[k][\"position\"].split(\" \")]),\n np.array([float(x) for x in frame_cfg[k][\"pose\"].split(\" \")]),\n rospy.Time.now(),\n k,\n body_frame)\n\n for k in sensor_cfg:\n br.sendTransform(np.array([float(x) for x in sensor_cfg[k][\"position\"].split(\" \")]),\n np.array([float(x) for x in sensor_cfg[k][\"pose\"].split(\" \")]),\n rospy.Time.now(),\n k,\n body_frame)\n\n for k in thruster_cfg:\n br.sendTransform(np.array([float(x) for x in sensor_cfg[k][\"position\"].split(\" \")]),\n np.array([float(x) for x in sensor_cfg[k][\"pose\"].split(\" \")]),\n rospy.Time.now(),\n k,\n body_frame)", "def go_to_pose(self, arg_pose, attempts=25):\n\n # Log initial pose\n pose_values = self.move_group.get_current_pose().pose\n rospy.loginfo('\\033[94m' + \">>> Current Pose:\" + '\\033[0m')\n rospy.loginfo(pose_values)\n\n # Set target pose\n self.move_group.set_pose_target(arg_pose)\n\n # Try multiple times to got to the target pose\n for _ in range(attempts):\n # Go to target pose (wait=False for Async Move)\n flag_plan = self.move_group.go(wait=True)\n\n # Break from loop if going to target pose was successful\n if flag_plan:\n break\n\n # Log final pose\n pose_values = self.move_group.get_current_pose().pose\n rospy.loginfo('\\033[94m' + \">>> Final Pose:\" + '\\033[0m')\n rospy.loginfo(pose_values)\n\n # Log the joint angles\n list_joint_values = self.move_group.get_current_joint_values()\n rospy.loginfo('\\033[94m' + \">>> Final Joint Values:\" + '\\033[0m')\n rospy.loginfo(list_joint_values)\n\n if flag_plan:\n rospy.loginfo('\\033[94m' + \">>> go_to_pose() Success\" + '\\033[0m')\n else:\n rospy.logerr(\n '\\033[94m' +\n \">>> go_to_pose() Failed. Solution for Pose not Found.\"\n + '\\033[0m')\n\n return flag_plan", "def predict(self):\n\n\t\t# Gets odometry for current move (velocity of robot and rotational velocity or robot)\n\t\tself.robot.get_odometry()\n\n\t\t# Updates matricies A, G, P\n\t\tself.robot.update_prediction_matrices()\n\t\t\n\t\t# Predicts position based on old position and odometry readings that are input into the system model\n\t\tself.robot.position = self.robot.position + self.robot.process_model\n\n\t\t# Creates Prediction Propogation matrix\n\t\tself.robot.P = np.dot(self.robot.A, np.dot(self.robot.P, self.robot.A.T)) + np.dot(self.robot.G, np.dot(self.robot.Q, self.robot.G.T))", "def cartesian_pose_client(position, orientation):\n action_address = '/' + prefix + 'driver/pose_action/tool_pose'\n client = actionlib.SimpleActionClient(action_address, kinova_msgs.msg.ArmPoseAction)\n client.wait_for_server()\n goal = kinova_msgs.msg.ArmPoseGoal()\n goal.pose.header = std_msgs.msg.Header(frame_id=(prefix + 'link_base'))\n goal.pose.pose.position = geometry_msgs.msg.Point(\n x=position[0], y=position[1], z=position[2])\n goal.pose.pose.orientation = geometry_msgs.msg.Quaternion(\n x=orientation[0], y=orientation[1], z=orientation[2], w=orientation[3])\n print('goal.pose in client 1: {}'.format(goal.pose.pose)) # debug\n client.send_goal(goal)\n if client.wait_for_result(rospy.Duration(200.0)):\n return client.get_result()\n else:\n client.cancel_all_goals()\n print(' the cartesian action timed-out')\n return None", "def move_to_hmi_pose(self):\n arm = self.get_arm(required_goals=['arm_out_of_way'])\n\n rotation = 1.57\n rotation_speed = 1\n rotation_duration = rotation / rotation_speed\n arm.send_joint_goal('arm_out_of_way', 0.0)\n self.base.force_drive(0, 0, rotation_speed, rotation_duration)\n arm.wait_for_motion_done()", "def pose_callback(msg):\n\t#Print the values of the x,y,theta of the Turtle:\n rospy.loginfo(\"x: %.11f, y: %.11f, theta: %.11f \", msg.x, msg.y, msg.theta)", "def publish(self):\n msg = JointState()\n msg.header.stamp = Time(seconds=self.__robot.getTime()).to_msg()\n msg.header.frame_id = self.__frame_id\n msg.name = [s + self.__joint_prefix for s in self.__joint_names]\n msg.position = []\n time_difference = self.__robot.getTime() - self.__previous_time\n for i in range(len(self.__sensors)):\n value = self.__sensors[i].getValue()\n msg.position.append(value)\n msg.velocity.append((value - self.__previous_position[i]) /\n time_difference if time_difference > 0 else 0.0)\n self.__previous_position[i] = value\n msg.effort = [0.0] * 6\n self.__publisher.publish(msg)\n self.__last_joint_states = msg\n self.__previous_time = self.__robot.getTime()", "def my_go_to_pose1(robot, x, y, angle_z):\n # Assuming positive y is to the robot's left and positive x is the direction the robot is already facing\n hypotenuse = numpy.sqrt(x*x + y*y)\n angle_offset_of_target_point = numpy.arcsin(y/hypotenuse)*180/math.pi\n my_turn_in_place(robot, angle_offset_of_target_point , 30)\n my_drive_straight(robot, hypotenuse, 50)\n my_turn_in_place(robot, angle_z-angle_offset_of_target_point, 30)\n time.sleep(1)", "def main():\n np.random.seed(219)\n rospy.init_node(\"sawyer_dagger_teacher\")\n pub_start = rospy.Publisher('/teacher/start', JointCommand, queue_size=1)\n pub_epi_fin = rospy.Publisher('/teacher/fin', JointCommand, queue_size=1)\n vel_ik_pos_pub = rospy.Publisher('/teacher/ik_vel/', Pose, queue_size = 3)\n pub3 = rospy.Publisher('/ddpg/vel_start/', Float64, queue_size=1)\n pub4 = rospy.Publisher('/ddpg/vel_end/', Float64, queue_size=1)\n goal_obs_pub = rospy.Publisher('/teacher/goal_obs/', Pose, queue_size=1)\n pos_cmd_pub = rospy.Publisher('/teacher/pos_cmd_pub/', PosCmd, queue_size=1)\n\n\n\n rospy.set_param('dagger_reset',\"false\") # param_name, param_value\n\n\n # Load Gazebo Models via Spawning Services\n # Note that the models reference is the /world frame\n # and the IK operates with respect to the /base frame\n # load_gazebo_models()\n # Remove models from the scene on shutdown\n rospy.on_shutdown(delete_gazebo_models)\n\n limb = 'right'\n hover_distance = 0.15 # meters\n # Starting Joint angles for right arm\n starting_joint_angles = {'right_j0': -0.041662954890248294,\n 'right_j1': -1.0258291091425074,\n 'right_j2': 0.0293680414401436,\n 'right_j3': 1.37518162913313,\n 'right_j4': -0.06703022873354225,\n 'right_j5': 0.7968371433926965,\n 'right_j6': 1.7659649178699421}\n\n pnp = PickAndPlace(limb, hover_distance)\n\n pnp.move_to_start(starting_joint_angles)\n\n \n # m_planner = trajectorySender()\n # An orientation for gripper fingers to be overhead and parallel to the obj\n overhead_orientation = Quaternion(\n x=-0.00142460053167,\n y=0.999994209902,\n z=-0.00177030764765,\n w=0.00253311793936)\n block_poses = list()\n # The Pose of the block in its initial location.\n # You may wish to replace these poses with estimates\n # from a perception node.\n block_poses.append(Pose(\n position=Point(x=0.45, y=0.155, z=-0.129),\n orientation=overhead_orientation))\n # Feel free to add additional desired poses for the object.\n # Each additional pose will get its own pick and place.\n block_poses.append(Pose(\n position=Point(x=0.6, y=-0.1, z=-0.129),\n orientation=overhead_orientation))\n # Move to the desired starting angles\n print(\"Running. Ctrl-c to quit\")\n # pnp.move_to_start(starting_joint_angles)\n idx = 0\n rate = rospy.Rate(1)\n block_quat_pose = [0.00142460053167,\n 0.999994209902,\n 0.00177030764765,\n 0.00253311793936]\n if rospy.has_param('vel_calc'):\n rospy.delete_param('vel_calc')\n load_gazebo_models()\n\n while not rospy.is_shutdown():\n\n\n starting_joint_angles['right_j0'] = np.random.uniform(-0.05, 0.05)\n starting_joint_angles['right_j1'] = np.random.uniform(-0.95, -0.85)\n starting_joint_angles['right_j2'] = np.random.uniform(-0.1, 0.1)\n starting_joint_angles['right_j3'] = np.random.uniform(1.6, 1.7)\n\n # starting_joint_angles['right_j0'] = np.random.uniform(-0.75, 0.75)\n # starting_joint_angles['right_j1'] = np.random.uniform(-0.97, -0.80)\n # starting_joint_angles['right_j2'] = np.random.uniform(-0.15, 0.15)\n # starting_joint_angles['right_j3'] = np.random.uniform(1.55, 1.75)\n\n start_pose = [starting_joint_angles['right_j0'], starting_joint_angles['right_j1'],\n starting_joint_angles['right_j2'], starting_joint_angles['right_j3'],\n starting_joint_angles['right_j4'], starting_joint_angles['right_j5'],\n starting_joint_angles['right_j6']]\n \n while not rospy.is_shutdown(): # wait until trajectory is collected for each episode\n if rospy.has_param('dagger_reset'):\n rospy.delete_param('dagger_reset')\n break\n pnp.move_to_start(starting_joint_angles)\n\n\n delete_kinect_camera()\n # delete_gazebo_models()\n delete_gazebo_block()\n rand_x = np.random.uniform(0.45, .75)\n rand_y = np.random.uniform(-0.2, 0.33)\n # rand_x = np.random.uniform(0.44,0.68)\n\n # rand_y = np.random.uniform(-0.20, 0.35)\n pose_block = Pose(position=Point(x=rand_x, y=rand_y, z=1.00)\n , orientation=overhead_orientation)\n pose_rob = Pose(position=Point(x=rand_x-0.015, y=rand_y+0.03, z=0.03), orientation=overhead_orientation) \n\n # rospy.set_param('vel_calc', 'true')\n # pnp.move_to_start(starting_joint_angles)\n # oktogo = pnp.move_to_start_vel_command(start_pose)\n # if rospy.has_param('vel_calc'):\n # rospy.delete_param('vel_calc')\n # loads env\n load_gazebo_block(block_pose=pose_block)\n # load_kinect_camera()\n\n \n\n # rospy.set_param('vel_calc', 'true')\n print 'Reaching target object... Learning...'\n rospy.set_param('epi_start', 'true')\n pnp.reach(pose_rob, pos_cmd_pub)\n # reached = pnp.reach_vel_ctrl(pose_rob)\n rospy.sleep(0.5)\n # if rospy.has_param('vel_calc'):\n # rospy.delete_param('vel_calc')\n # if reached:\n # rospy.set_param('reached', 'true')\n # goal_obs_pub.publish(pose_rob)\n\n\n print 'Reached target object! and Goal obs acquired Resetting...'\n # while not rospy.is_shutdown(): # wait until trajectory is collected for each episode\n # if rospy.has_param('demo_success'):\n # break\n while not rospy.is_shutdown(): # wait until trajectory is collected for each episode\n if rospy.has_param('demo_success'):\n rospy.delete_param('demo_success')\n break\n \n # rospy.delete_param('demo_success')\n \n\n return 0", "def main():\n\n\t# Initialising ROS node\n\trospy.init_node(\"turtlebot_move\")\n\n\t# Reading parameters from the launch file\n\tnpy_path = rospy.get_param(\"/publish_velocity/npy_file_path\")\n\n\t# Reading the generated A* path from the .npy file\n\t# rospack = rospkg.RosPack()\n\t# npy_path = os.path.join(rospack.get_path('turtlebot_astar'), 'src/path_dumps/path_final.npy')\n\trobot_path_list = np.load(npy_path, allow_pickle=True)\n\n\tglobal goal\n\tgoal.x, goal.y = robot_path_list[0].getXYCoords()\n\n\t# Creating the Publisher and the Subscriber\n\tpub = rospy.Publisher(\"/cmd_vel\", Twist, queue_size=1)\n\tsub = rospy.Subscriber(\"/odom\", Odometry, newOdom, (robot_path_list, pub))\n\n\tr = rospy.Rate(4)\n\tspeed = Twist()\n\n\ttry:\n\t\twhile not rospy.is_shutdown():\n\n\t\t\tinc_x = goal.x - x\n\t\t\tinc_y = goal.y - y\n\n\t\t\tangle_to_goal = math.atan2(inc_y, inc_x)\n\n\t\t\tif abs(angle_to_goal - theta) < 0.1:\n\t\t\t\tspeed.linear.x = 0.5\n\t\t\t\tspeed.angular.z = 0.0\n\t\t\telif (angle_to_goal - theta) < 0:\n\t\t\t\tspeed.linear.x = 0.0\n\t\t\t\tspeed.angular.z = -0.3\n\t\t\telse:\n\t\t\t\tspeed.linear.x = 0.0\n\t\t\t\tspeed.angular.z = 0.3\n\n\t\t\t# Publishing the Velocity Inputs for the TurtleBot on the topic /cmd_vel\n\t\t\tpub.publish(speed)\n\t\t\tr.sleep()\n\n\texcept rospy.exceptions.ROSInterruptException as ros_int:\n\t\tprint(ros_int)\n\texcept Exception as e:\n\t\traise e", "def perform(self, reevaluate=False):\n goal_position = self.blackboard.world_model.get_detection_based_goal_position_uv()\n if not goal_position:\n return\n\n ball_u, ball_v, ball_frame = self.blackboard.world_model.get_ball_position_uv_ball_approach_frame()\n goal_u, goal_v, goal_frame = self.blackboard.world_model.get_detection_based_goal_position_uv_ball_approach_frame()\n point = (ball_u, ball_v, goal_u, goal_v)\n\n if not ball_frame == goal_frame:\n return\n\n pose_msg = PoseStamped()\n pose_msg.header.stamp = rospy.Time.now()\n pose_msg.header.frame_id = ball_frame\n\n # ball position\n pose_msg.pose.position = Point(point[0], point[1], 0)\n\n rotation = quaternion_from_euler(0, 0, 45) # (hopefully) 45 degrees to the left\n pose_msg.pose.orientation = Quaternion(*rotation)\n pose_msg.pose.position.x -= 0.2 # 20 cm before the ball\n pose_msg.pose.position.y -= 0.2 # 20 cm to the right of the ball\n\n self.blackboard.pathfinding.publish(pose_msg)", "def go_to_pose(self, arg_pose):\n\n pose_values = self._group.get_current_pose().pose\n self._group.set_pose_target(arg_pose)\n flag_plan = self._group.go(wait=True) # wait=False for Async Move\n pose_values = self._group.get_current_pose().pose\n list_joint_values = self._group.get_current_joint_values()\n if (flag_plan == True):\n rospy.loginfo('\\033[94m' + \">>> go_to_pose() Success\" + '\\033[0m')\n else:\n rospy.logerr(\n '\\033[94m' + \">>> go_to_pose() Failed. Solution for Pose not Found.\" + '\\033[0m')\n\n return flag_plan", "def move2goal(self):\n \n global points, point, point_old, distance_tolerance, trigger, start\n\n goal_pose = Pose()\n\n # Get the input from the user.\n goal_pose.x = points[point][0] # float(input(\"Set your x goal: \"))\n goal_pose.y = points[point][1] # float(input(\"Set your y goal: \"))\n\n vel_msg = Twist()\n\n data = [['nameservers','panel'], ['nameservers','panel']]\n\n file_name2 = \"/home/kmro/wr_ws/src/zad2_package/short_distances/distances-p%d\" % point\n short_distances = open(file_name2, \"w\")\n \n file_name1 = \"/home/kmro/wr_ws/src/zad2_package/distances/distances-p%d\" % point\n all_distances_file = open(file_name1, \"w\")\n\n val = \"dx%d\\t\\t\" % (point-1) + \"dy%d\\t\\t\" % (point-1) + \"dx%d\\t\\t\" % point + \"dy%d\\n\" % point \n short_distances.write(str(val))\n\n val = \"dx\\t\\tdy\"\n for i in range(22):\n val = val + \"\\t\\tdx%d\\t\\t\" % i + \"dy%d\" % i \n all_distances_file.write(str(val))\n\n while self.euclidean_distance(goal_pose) >= distance_tolerance:\n\n # Porportional controller.\n # https://en.wikipedia.org/wiki/Proportional_control\n\n # Linear velocity in the x-axis.\n vel_msg.linear.x = self.linear_vel(goal_pose, vel_mult)\n vel_msg.linear.y = 0\n vel_msg.linear.z = 0\n\n # Angular velocity in the z-axis.\n vel_msg.angular.x = 0\n vel_msg.angular.y = 0\n vel_msg.angular.z = self.angular_vel(goal_pose, rot_mult)\n\n # Publishing our vel_msg\n self.velocity_publisher.publish(vel_msg)\n\n # Print results to files\n if point_old != point:\n print(\"point = \", point)\n point_old = point\n if point > 0:\n val = \"{:.3f}\\t\".format(points[point-1][0] - self.pose.x)\n short_distances.write(str(val))\n # print(val, end=' ')\n val = \"{:.3f}\\t\".format(points[point-1][1] - self.pose.y)\n short_distances.write(str(val))\n # print(val, end=' ')\n val = \"{:.3f}\\t\".format(points[point][0] - self.pose.x)\n short_distances.write(str(val))\n # print(val, end=' ')\n val = \"{:.3f}\\t\".format(points[point][1] - self.pose.y)\n short_distances.write(str(val))\n # print(val, end=' ')\n if trigger == True:\n smallest_distances.append(((points[point-1][0] - self.pose.x)**2 + (points[point-1][1] - self.pose.y)**2)**0.5)\n trigger = False\n short_distances.write(\"\\n\")\n\n val = \"{:.3f}\\t\".format(goal_pose.x - self.pose.x)\n all_distances_file.write(str(val))\n val = \"{:.3f}\\t\".format(goal_pose.y - self.pose.y)\n all_distances_file.write(str(val))\n for i in range(1,len(points)):\n val = \"{:.3f}\\t\".format(points[i-1][0] - self.pose.x)\n all_distances_file.write(str(val))\n # print(val, end=' ')\n val = \"{:.3f}\\t\".format(points[i-1][1] - self.pose.y)\n all_distances_file.write(str(val))\n # print(val, end=' ')\n all_distances_file.write(\"\\n\")\n\n # Publish at the desired rate.\n self.rate.sleep()\n \n short_distances.close()\n all_distances_file.close()\n\n # If it was not the last goal, then move to the second one\n if point < len(points) - 1:\n trigger = True\n point = point + 1\n goal_pose.x = points[point][0]\n goal_pose.y = points[point][1]\n vel_msg.linear.x = self.linear_vel(goal_pose, vel_mult)\n vel_msg.angular.z = self.angular_vel(goal_pose, rot_mult)\n self.move2goal()\n # Stopping our robot after the movement is over.\n else:\n vel_msg.linear.x = 0\n vel_msg.angular.z = 0\n suma = 0\n i = 0\n for j in smallest_distances:\n print(\"p%d: \" % i , \"%.3f error\" % j)\n i = i + 1\n print(\"error_sum(22) = %.3f\" % sum(smallest_distances))\n end = time.time()\n print(\"Elapsed time: \", end - start)\n exit()\n \n point = point + 1\n self.velocity_publisher.publish(vel_msg)\n\n # If we press control + C, the node will stop.\n rospy.spin()", "def current_pose_cb(self, msg):\n\n # Save the current vehicle pose\n self.pose = msg", "def current_pose_cb(self, msg):\n\n # Save the current vehicle pose\n self.pose = msg", "def update_robot_pose(self):\n # first make sure that the particle weights are normalized\n self.normalize_particles()\n\n x = 0\n y = 0\n theta = 0\n angles = []\n for particle in self.particle_cloud:\n x += particle.x * particle.w\n y += particle.y * particle.w\n v = [particle.w * math.cos(math.radians(particle.theta)),\n particle.w * math.sin(math.radians(particle.theta))]\n angles.append(v)\n theta = sum_vectors(angles)\n orientation_tuple = tf.transformations.quaternion_from_euler(0, 0, theta)\n self.robot_pose = Pose(position=Point(x=x, y=y),\n orientation=Quaternion(x=orientation_tuple[0], y=orientation_tuple[1],\n z=orientation_tuple[2], w=orientation_tuple[3]))", "def set_robot_pos(self):\n\t\tx,y,z = self.geo2desiredENU(self.curr_lat, self.curr_lon, self.gpsAlt)\n\t\tself.robot_msg.point.x = x\n\t\tself.robot_msg.point.y = y\n\t\tself.robot_msg.point.z = z", "def record_pose(self):\n drone_pos = client.getMultirotorState().kinematics_estimated.position\n output = \" | \".join([\"x_val: \" + str(drone_pos.x_val), \"y_val: \" + str(drone_pos.y_val), \"z_val: \" + str(drone_pos.z_val)])\n print(output)\n self.log_arr.append(output)\n return output", "def goal_callback(self, pose: PoseStamped) -> None:\n\n # Update existing path\n #\n if self.soccerbot.robot_path is not None:\n print(\"Updating New Goal\")\n start = time.time()\n goal_position = Transformation(pose=pose.pose)\n self.soccerbot.setWalkingTorsoHeight(goal_position)\n self.new_path = copy.deepcopy(self.soccerbot.robot_path)\n\n try:\n self.t_new_path = self.new_path.dynamicallyUpdateGoalPosition(self.t, goal_position)\n except Exception as ex:\n print(ex)\n return\n\n end = time.time()\n\n self.soccerbot.publishPath(self.new_path)\n print(\"New Goal Updated, Time Taken: \", end - start)\n pass\n self.new_goal = pose", "def my_go_to_pose1(robot, x, y, angle_z):\n\t# ####\n\t# TODO: Implement a function that makes the robot move to a desired pose\n\t# using the my_drive_straight and my_turn_in_place functions. This should\n\t# include a sequence of turning in place, moving straight, and then turning\n\t# again at the target to get to the desired rotation (Approach 1).\n\t# ####\n\tfirstRotationInRadians = (0 if y == 0 else 90) if x == 0 else math.atan(y/x)\n\tfirstRotation = firstRotationInRadians * 360.0/ (2.0 * math.pi)\n\tmy_turn_in_place(robot, firstRotation, 30)\n\trobot.stop_all_motors()\n\t# robot.drive_wheels(0, 0, duration=1)\n\t# time.sleep(1)\n\tmy_drive_straight(robot, math.sqrt(x*x + y*y), (-1 if x < 0 else 1) * 30)\n\trobot.stop_all_motors()\n\t# robot.drive_wheels(0, 0, duration=1)\n\t# time.sleep(1)\n\tmy_turn_in_place(robot, angle_z - firstRotation , 30)\n\ttime.sleep(1)", "def get_goal_ee_pose(self):\n #self.target_endpoint = #magic tf call that I can add ie the pose of the palm from camera aruco detection\n while True:\n try:\n translation, rotation = self.listener.lookupTransform('world_frame', 'palm_frame_camera', rospy.Time()) # ee_frame_camera_flipped\n break # once the transform is obtained move on\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n continue # if it fails try again\n point = [translation[0], translation[1], translation[2]]\n self.target_endpoint = np.array(point)\n # rospy.logerr(self.target_endpoint)", "def cozmo_go_to_pose(robot, x, y, angle_z):\n\trobot.go_to_pose(Pose(x, y, 0, angle_z=degrees(angle_z)), relative_to_robot=True).wait_for_completed()", "def set_pose(self, desired_motor_angles, duration):\n if duration <= 1:\n raise ValueError(\n 'The set pose duration of {}s is too short and unsafe.'.format(\n duration))\n\n assert len(desired_motor_angles) == self.num_motors\n print(\"enter set pose\")\n\n # Get an initial state.\n self._get_state()\n initial_motor_angles = self.motor_angles\n\n sequence_start_time = self._clock()\n time_since_start = 0\n while time_since_start < duration:\n progress = time_since_start / duration\n # Use cos to create a soft acceleration/deceleration profile.\n progress = 0.5 * (1 - math.cos(progress * math.pi))\n assert 0 <= progress <= 1\n target_motor_angles = []\n for motor_id, desired_motor_angle in enumerate(\n desired_motor_angles, start=0):\n target_motor_angles.append((1 - progress) *\n initial_motor_angles[motor_id] +\n progress * desired_motor_angle)\n\n self.apply_action(target_motor_angles,\n robot_config.MotorControlMode.POSITION)\n\n # TODO(tingnan): This sleep is probably not needed?\n time.sleep(0.002)\n time_since_start = self._clock() - sequence_start_time", "def cozmo_go_to_pose(robot, x, y, angle_z):\n robot.go_to_pose(Pose(x, y, 0, angle_z=degrees(angle_z)), relative_to_robot=True).wait_for_completed()", "def publish_pose(self):\n pose_msg = PoseWithCovarianceStamped()\n pose_msg.header.stamp = self.current_frame.time\n pose_msg.header.frame_id = \"map\"\n pose_msg.pose.pose = g2r(self.current_frame.pose3)\n\n cov = 1e-4 * np.identity(6, np.float32)\n # FIXME Use cov in current_frame\n cov[np.ix_((0, 1, 5), (0, 1, 5))] = self.current_keyframe.transf_cov\n pose_msg.pose.covariance = cov.ravel().tolist()\n self.pose_pub.publish(pose_msg)\n\n o2m = self.current_frame.pose3.compose(self.current_frame.dr_pose3.inverse())\n o2m = g2r(o2m)\n p = o2m.position\n q = o2m.orientation\n self.tf.sendTransform(\n (p.x, p.y, p.z),\n [q.x, q.y, q.z, q.w],\n self.current_frame.time,\n \"odom\",\n \"map\",\n )\n\n odom_msg = Odometry()\n odom_msg.header = pose_msg.header\n odom_msg.pose.pose = pose_msg.pose.pose\n odom_msg.child_frame_id = \"base_link\"\n odom_msg.twist.twist = self.current_frame.twist\n self.odom_pub.publish(odom_msg)", "def Run(self):\n while not rospy.is_shutdown():\n # get input of destination\n if (self.target_x is None) or (self.target_y is None):\n self.target_x, self.target_y = self.GetTarget()\n # if the destination is not in x >= 0 and y <= 11, ask user to re-input\n # till the right values found\n if (self.target_x < 0 or self.target_x > 10) or (self.target_y < -10 or self.target_y > 10):\n print(\"-------------------------------------------------------\") \n print(\"WARNING: Invalid Input, please reinput the destination.\")\n print(\"-------------------------------------------------------\") \n self.target_x = None\n self.target_y = None\n else:\n print(\"Current Destination: [{}, {}]\".format(self.target_x, self.target_y))\n else:\n ################################################################################################\n # get all necessary parameters\n goal = np.array([self.target_x, self.target_y])\n robot_pos = self.GetRobotInfo()\n ranges, angles = self.GetObstaclePos()\n\n if (ranges is not None) and (angles is not None):\n ctrl = TangentBug(self.Lidar.range_max)\n # obsts = ctrl.Continuity(ranges, angles, robot_pos[:2])\n # print(len(obsts))\n linear, omega = ctrl.MotionToGo(ranges, angles, goal, robot_pos)\n print(\"=======================================\")\n # print([linear, omega])\n else:\n linear = 0.\n omega = 0.\n print(\"---------------------------------------\")\n print(\"NO OBSTACLE DETECTED.\")\n print(\"---------------------------------------\")\n\n ################################################################################################\n self.vel.linear.x = linear \n self.vel.angular.z = omega\n self.pub.publish(self.vel)\n\n # sleep till the next commend sent\n self.rate.sleep()", "def my_go_to_pose2(robot, x, y, angle_z):\n\t# ####\n\t# TODO: Implement a function that makes the robot move to a desired pose\n\t# using the robot.drive_wheels() function to jointly move and rotate the \n\t# robot to reduce distance between current and desired pose (Approach 2).\n\t# ####\n\t\n\tabsoluteTargetPosition = (robot.pose.position.x + x, robot.pose.position.y + y, robot.pose.rotation.angle_z.degrees + angle_z)\n\t\n\twhile(math.sqrt(x*x + y*y) > 50.0):\n\t\t# print(\"(x, y, angle_z) = (%i,%i,%i)\" % (x, y, angle_z))\n\t\tfirstRotationInRadians = (0 if y == 0 else 90) if x == 0 else math.atan(y/x)\n\t\tleftMotor = 10 * (2 * x - angle_z * (2 * math.pi / 360.0) * get_distance_between_wheels() * math.cos(firstRotationInRadians)) / (2 * get_front_wheel_radius() * math.cos(firstRotationInRadians))\n\t\trightMotor = 10 * (2 * x + angle_z * (2 * math.pi / 360.0) * get_distance_between_wheels() * math.cos(firstRotationInRadians)) / (2 * get_front_wheel_radius() * math.cos(firstRotationInRadians))\n\t\t# print(\"(leftMotor, rightMotor) = (%i,%i)\" % (leftMotor, rightMotor))\n\t\tangle_delta = get_front_wheel_radius() * (rightMotor - leftMotor) / get_distance_between_wheels()\n\t\tx_delta = get_front_wheel_radius() * math.cos(angle_z * 2.0 * math.pi / 360.0) * (leftMotor + rightMotor) / 2.0\n\t\ty_delta = get_front_wheel_radius() * math.sin(angle_z * 2.0 * math.pi / 360.0) * (leftMotor + rightMotor) / 2.0\n\t\t# print(\"angle_delta %i\" % angle_delta)\n\t\t# x = x - get_front_wheel_radius() * math.cos(angle_delta) * (leftMotor + rightMotor) / 2.0\n\t\t# y = y - get_front_wheel_radius() * math.sin(angle_delta) * (leftMotor + rightMotor) / 2.0\n\t\t# print(\"(x, y, angle_z) = (%i,%i,%i)\" % (x, y, angle_z))\n\t\t# angle_z = angle_z + angle_delta * (360.0/(2 * math.pi))\n\t\t# print(\"(x, y, angle_z) = (%i,%i,%i)\" % (x, y, angle_z))\n\t\trobot.drive_wheels(leftMotor, rightMotor, duration = 1)\n\t\trobot.stop_all_motors()\n\t\t# time.sleep(1)\n\t\tx = absoluteTargetPosition[0] - robot.pose.position.x\n\t\ty = absoluteTargetPosition[1] - robot.pose.position.y\n\t\tangle_z = absoluteTargetPosition[2] - robot.pose.rotation.angle_z.degrees\n\t\t# print(\"(x, y, angle_z) = (%i,%i,%i)\" % (x, y, angle_z))\n\t\trobot.stop_all_motors()\n\t\t# robot.drive_wheels(0,0)", "def record(self):\n\n if self.selected_point is None:\n RosProxy().notify(\"No calibration point selected\", STATE.ERROR)\n return\n\n if len(self.poses) == 0:\n RosProxy().notify(\"No calibration point added\", STATE.ERROR)\n return\n\n if RobotControllerHandler.current_state is None:\n RosProxy().notify(\"The current robot state is not available\", STATE.ERROR)\n return\n\n robot_pose = RobotControllerHandler.current_state.pose\n\n # pos = robot_pose.position\n # orientation = robot_pose.orientation\n # axes = 'sxyz'\n # (rx, ry, rz) = tf.transformations.euler_from_quaternion(np.array(\n # [orientation.x, orientation.y, orientation.z, orientation.w]), axes)\n\n self.poses[self.selected_point].measured = robot_pose\n self.calibration_changed()\n\n # logdebug(\n # \"Recording pose (x,y,z,r,p,y): %d,%d,%d,%d,%d,%d\" %\n # (pos.x *\n # 1000,\n # pos.y *\n # 1000,\n # pos.z *\n # 1000,\n # math.degrees(rx),\n # math.degrees(ry),\n # math.degrees(rz)))", "def get_goal_pose(self,pose=[0,0,0]):\n\t\treturn pose", "def my_go_to_pose2(robot, x, y, angle_z):\n # ####\n # TODO: Implement a function that makes the robot move to a desired pose\n # using the robot.drive_wheels() function to jointly move and rotate the \n # robot to reduce distance between current and desired pose (Approach 2).\n # ####\n pass", "def test_move_pose(self):\n\n global sendPlayCallParams\n \n req = self.get_moves(1)\n \n with patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \\\n patch.object(socket.socket, 'bind', return_value=True) as mock_bind:\n src.drivers.hyundai_robot.udp = UdpConnector(\"localhost\", 8000)\n \n\n with patch.object(UdpConnector, 'appendToQueue') as u:\n \n src.drivers.hyundai_robot.move_pose(req)\n\n assert u.call_count == 1\n\n src.drivers.hyundai_robot.udp.stopConsumeThread()", "def move_robot(request):\n\n phase_id = request.phase\n print \"phase_id is {}\".format(phase_id)\n if phase_id == 0:\n success = move_to_marshmallow()\n elif phase_id == 1:\n success = move_to_mouth()\n elif phase_id == 2:\n success = release_marshmallow()\n elif phase_id == 3:\n success = grip_marshmallow()\n elif phase_id == 4:\n success = move_to_start_state()\n elif phase_id == 5:\n success = perform_full_sequence()\n message = \"placeholder\"\n\n return TriggerPhaseResponse(success, message)", "def execute(self, userdata):\n\n global x_home\n global y_home\n\n rospy.loginfo(rospy.get_caller_id() + 'Executing state SLEEP ')\n ## Setting the goal home position\n goal = exp_assignment2.msg.PlanningGoal()\n goal.target_pose_robot.pose.position.x = x_home\n goal.target_pose_robot.pose.position.y = y_home\n rospy.loginfo(rospy.get_caller_id() + 'Back home x: %d y: %d',x_home,y_home)\n client.send_goal(goal) \n client.wait_for_result()\n rospy.loginfo('i m arrived, now i will take a nap')\n time.sleep(3)\n self.rate.sleep()\n return 'GoToNormal'", "def _go_to_goal(self, new_goal):\n\n # Waits until the action server has started up and started\n # listening for goals.\n rospy.loginfo(\"Waiting for move_base to come up . . . \")\n self._MoveBaseClient.wait_for_server()\n rospy.loginfo(\"move_base is UP!\")\n\n # Wait for tf_listener to be ready.\n # NOTE: I'm not sure this is required anymore or not\n # If you call self.tf_listener too soon it has no data in the listener buffer!\n # http://answers.ros.org/question/164911/move_base-and-extrapolation-errors-into-the-future/\n # We could put a static delay in here, but this is faster.\n rospy.loginfo(\"Waiting for tf_listener to be ready . . . \")\n tf_listener_ready = False\n # http://wiki.ros.org/tf2/Tutorials/Writing%20a%20tf2%20listener%20%28Python%29\n while not tf_listener_ready:\n try:\n self.tf_Buffer.lookup_transform(\"map\", \"base_link\", rospy.Time())\n tf_listener_ready = True\n except tf2_ros.ExtrapolationException:\n rospy.loginfo(\"tf_listener not ready . . . \")\n rospy.sleep(0.1)\n rospy.loginfo(\"tf_listener READY!\")\n\n # Create a variable to hold our goal\n goal = move_base_msgs.msg.MoveBaseGoal()\n # Note that move_base will not go to an all zero target.\n\n # we'll create a goal to send to move_base\n # If you are just sending commands to the robot with no map use base_link\n # goal.target_pose.header.frame_id = \"base_link\"\n # But if you have SLAM or Localization active and are using a map, you need to use the map!\n goal.target_pose.header.frame_id = \"map\"\n\n goal.target_pose.pose = new_goal.pose\n\n rospy.loginfo(\"Populated goal.\")\n\n #######################################\n # This is the part that sends the goal!#\n #######################################\n\n rospy.loginfo(\"Sending goal\")\n # Sends the goal to the action server.\n result = -1\n timeoutSeconds = 60 # TODO: Should this be sent as part of the call?\n if not rospy.is_shutdown():\n self._MoveBaseClient.cancel_goals_at_and_before_time(rospy.Time.now())\n # NOTE: Do not use cancel_all_goals here as it can cancel future goals sometimes!\n goal.target_pose.header.stamp = rospy.Time.now()\n if not rospy.is_shutdown():\n self._MoveBaseClient.send_goal(goal)\n count = 0\n finished = False\n # Wait for action to finish or timeout to run out\n # Use double timeout, but cancel if timeout is met\n while (\n count < (timeoutSeconds * 2)\n and not finished\n and not rospy.is_shutdown()\n ):\n if count > timeoutSeconds:\n finished = True\n rospy.loginfo(\n \"Time-out reached while attempting to reach goal, canceling!\"\n )\n # NOTE: If the robot tends to get stuck without moving at all,\n # 1. Subscribe to cmd_vel\n # 2. Increment a timer.\n # 3. Zero it out whenever cmd_vel is updated.\n # 4. Cancel this if the timer gets too high.\n count += 1\n rospy.sleep(\n 1\n ) # Set this delay as you see fit. If the robot is extremely fast this could be slowing you down!\n result = self._MoveBaseClient.get_state()\n resultText = \"\"\n # http://docs.ros.org/indigo/api/actionlib_msgs/html/msg/GoalStatus.html\n if result == GoalStatus.PENDING:\n resultText = \"PENDING\"\n if result == GoalStatus.ACTIVE:\n resultText = \"ACTIVE\"\n if result == GoalStatus.PREEMPTED:\n finished = True\n resultText = \"PREEMPTED\"\n if result == GoalStatus.SUCCEEDED:\n finished = True\n resultText = \"SUCCEEDED\"\n if result == GoalStatus.ABORTED:\n finished = True\n resultText = \"ABORTED\"\n if result == GoalStatus.REJECTED:\n finished = True\n resultText = \"REJECTED\"\n if result == GoalStatus.PREEMPTING:\n resultText = \"PREEMPTING\"\n if result == GoalStatus.RECALLING:\n resultText = \"RECALLING\"\n if result == GoalStatus.RECALLED:\n finished = True\n resultText = \"RECALLED\"\n if result == GoalStatus.LOST:\n finished = True\n resultText = \"LOST\"\n rospy.loginfo(\n \"Pending result:\"\n + str(result)\n + \" \"\n + resultText\n + \" Time-out in :\"\n + str(timeoutSeconds - count)\n )\n # If it was determined that we are \"finished\" then cancel\n # any pending goals right now, because the loop will not\n # repeat.\n if finished:\n # NOTE: Do not use cancel_all_goals here as it can cancel future goals sometimes!\n self._MoveBaseClient.cancel_goal()\n\n trans = self.tf_Buffer.lookup_transform(\"map\", \"base_link\", rospy.Time())\n\n rospy.loginfo(\"New Position: \")\n rospy.loginfo(str(trans.transform.translation))\n rospy.loginfo(\" New Orientation: \")\n rospy.loginfo(str(trans.transform.rotation))\n\n if result == GoalStatus.SUCCEEDED:\n return True\n else:\n return False", "def test_guarded_approach_pose(self):\n # Note: This has to match the .test file\n desired_twist = geometry_msgs.msg.TwistStamped()\n desired_twist.header.frame_id = 'arm_link_5'\n desired_twist.twist.linear.z = 1.0\n\n current_joints = sensor_msgs.msg.JointState()\n current_joints.name = ['arm_joint_2', 'arm_joint_3', 'arm_joint_4']\n current_joints.position = [0.2, 1.8, 1.7]\n current_joints.effort = [0.2, 1.8, 1.7]\n\n while not self.wait_for_result:\n self.pub_current_joints.publish(current_joints)\n self.event_out.publish('e_start')\n\n self.assertEqual(self.result.header.frame_id, desired_twist.header.frame_id)\n self.assertEqual(\n self.result.twist, desired_twist.twist,\n msg=\"Result: {0}\\nDesired: {1}\".format(self.result, desired_twist)\n )\n\n self.result = None\n self.wait_for_result = None\n\n while not self.wait_for_result:\n self.event_out.publish('e_collision')\n\n # the twist should be zero after a collision is detected\n desired_twist.twist.linear.z = 0.0\n self.assertEqual(\n self.result.twist, desired_twist.twist,\n msg=\"Result: {0}\\nDesired: {1}\".format(self.result, desired_twist)\n )", "def move_to(self, x_pos, y_pos, z_pos):\n def ik_angles(X_Pos,Y_Pos,Z_Pos,Roll,Pitch,Yaw):\n \"\"\"\n Compute the joint angles needed to place the robot arm in a given pose.\n \"\"\"\n limb_side = 'left'\n ns = \"ExternalTools/\" + limb_side + \"/PositionKinematicsNode/IKService\"\n iksvc = rospy.ServiceProxy(ns, SolvePositionIK)\n ikreq = SolvePositionIKRequest()\n hdr = Header(stamp=rospy.Time.now(), frame_id='base')\n quat = tf.transformations.quaternion_from_euler(float(Roll),float(Pitch),float(Yaw))\n poses = {\n 'left': PoseStamped(\n header=hdr,\n pose=Pose(\n position=Point(\n\t\t x=float(X_Pos),\n y=float(Y_Pos),\n z=float(Z_Pos),\n ),\n orientation=Quaternion(\n\t\t x = quat[0],\n\t\t y = quat[1],\n\t\t z = quat[2],\n\t\t w = quat[3],\n\t\t )\n )\n )\n }\n\n ikreq.pose_stamp.append(poses[limb_side])\n try:\n rospy.wait_for_service(ns, 5.0)\n resp = iksvc(ikreq)\n except (rospy.ServiceException, rospy.ROSException), e:\n rospy.logerr(\"Service call failed: %s\" % (e,))\n return 1\n\n # Check if result valid, and type of seed ultimately used to get solution\n # convert rospy's string representation of uint8[]'s to int's\n resp_seeds = struct.unpack('<%dB' % len(resp.result_type),\n resp.result_type)\n if (resp_seeds[0] != resp.RESULT_INVALID):\n # Format solution into Limb API-compatible dictionary\n limb_joints = dict(zip(resp.joints[0].name, resp.joints[0].position))\n return limb_joints \n\n else:\n print(\"INVALID POSE - No Valid Joint Solution Found.\")\n\n return 0\n \n roll = 0\n pitch = 3.14\n yaw = 0 #controls roll of gripper\n\n #compute required joint angles\n angles = ik_angles(x_pos,y_pos,z_pos,roll,pitch,yaw)\n\n #move left limb to position\n limb = baxter_interface.Limb('left')\n limb.move_to_joint_positions(angles)\n \n #update current position\n self.x = x_pos\n self.y = y_pos\n self.z = z_pos\n \n return [x_pos, y_pos]", "def save_poses():\n get_marshmallow_pose(should_remember=True)\n get_mouth_pose(should_remember=True)\n rospy.sleep(1)\n print \"Finished saving poses\"", "def judge_goal(self):\n err_pos = math.sqrt((self.y_des - self.y)**2 +(self.x_des - self.x)**2)\n print(\"t= %s\" % rospy.get_time()+\"-----------\")\n print('destination position=['+str(self.x_des)+','+str(self.y_des)+\"]\")\n print('the current position=['+str(self.x)+','+str(self.y)+\"]\")\n print('the current yaw angle=['+str(self.yaw))\n print('distance to destination='+str(err_pos))\n\n if(err_pos < 0.8):\n print('reach goal!!!!!')\n self.goal_flag=1", "def _on_pose(self, msg):\n if self._history_length == Dashboard.POSE_MAX_TIMESTEPS:\n self._pose_history[:, :-1] = self._pose_history[:, 1:]\n else:\n self._history_length += 1\n\n self._pose_history[:, self._history_length-1] = [\n rospy.Time.now().to_time() % 1000,\n msg.x,\n msg.y,\n msg.theta,\n msg.linear_velocity,\n msg.angular_velocity,\n ]", "def cartesian_pose_client(position, orientation):\n action_address = '/' + 'jaco' + '_arm_driver/arm_pose/arm_pose'\n client = actionlib.SimpleActionClient(action_address, jaco_msgs.msg.ArmPoseAction)\n client.wait_for_server()\n\n goal = jaco_msgs.msg.ArmPoseGoal()\n goal.pose.header = std_msgs.msg.Header(frame_id=('jaco' + '_api_origin'))\n goal.pose.pose.position = geometry_msgs.msg.Point(\n x=position[0], y=position[1], z=position[2])\n goal.pose.pose.orientation = geometry_msgs.msg.Quaternion(\n x=orientation[0], y=orientation[1], z=orientation[2], w=orientation[3])\n\n client.send_goal(goal)\n\n if client.wait_for_result(rospy.Duration(10.0)):\n return client.get_result()\n else:\n client.cancel_all_goals()\n print(' the cartesian action timed-out')\n return None", "def goToMultiPose(self,poses,speed=DEFAULT_SPEED):\n cmds = []\n for p in poses:\n print(\"Moving to {}\".format(p.name))\n gpio = p.gpio\n if gpio != None:\n pin = gpio.get('pin',None)\n value = gpio.get('value',0)\n if pin != None:\n if type(pin) is str:\n self.robot.set_io({pin:value})\n elif pin != -1:\n GPIO.output(pin, {0:False,1:True}[value])\n self.waitFor(gpio.get('delay',2))\n print('Set Io: '+str(pin)+' value: '+str(value))\n continue\n \n #cmds.append(self.getMovementCommand(p,speed=speed))\n newSpeed = speed\n if p.speed != None:\n newSpeed = p.speed\n cmd = self.getMovementCommand(p,speed=newSpeed)\n self.robot.play(cmd)\n self.waitForCompletion()\n '''\n if len(cmds)>0:\n self.robot.play(cmds)\n self.waitForCompletion()\n print(\"Completed move\")\n '''", "def move(self):\n # moving each of the obstacles\n time = rospy.get_rostime()\n self.vel_msg1.linear.x = math.sin(time.to_sec())\n self.pub1.publish(self.vel_msg1)\n self.vel_msg2.linear.x = math.cos(1.2 * time.to_sec())\n self.pub2.publish(self.vel_msg2)\n self.vel_msg3.linear.x = math.cos(2 * time.to_sec())\n self.pub3.publish(self.vel_msg3)\n self.vel_msg4.linear.x = math.sin(0.85 * time.to_sec())\n self.pub4.publish(self.vel_msg4)", "def update(self):\n self.logger.debug(\" %s [GenerateNextPose::update()]\" % self.name)\n\n # This behavior will always be successfull. But if it that was not the\n # case, it would return failure\n # self.feedback_message = \"Some failure message!\"\n # return py_trees.common.Status.FAILURE\n\n # If the behavior could be unning for a while, we would have to return\n # py_trees.common.Status.RUNNING, and not block its execution.\n\n # In this example we just need to create the position and orientation\n # keys corresponding to the next desired pose.\n self.curr_waypoint = (self.curr_waypoint + 1) % len(self.waypoints)\n self.blackboard.set(self.pos_key,\n action.Move2Pos.Goal(target_position=Point(\n x=self.waypoints[self.curr_waypoint].x,\n y=self.waypoints[self.curr_waypoint].y,\n z=0.0)))\n self.blackboard.set(\n self.angle_key,\n action.Rotate2Angle.Goal(\n target_orientation=self.waypoints[self.curr_waypoint].theta))\n self.feedback_message = \"New position and orientation generated!\"\n return py_trees.common.Status.SUCCESS", "def pose_cb(self, msg):\n self.current_pose_g = msg\n self.enu_2_local()\n\n q0, q1, q2, q3 = (\n self.current_pose_g.pose.pose.orientation.w,\n self.current_pose_g.pose.pose.orientation.x,\n self.current_pose_g.pose.pose.orientation.y,\n self.current_pose_g.pose.pose.orientation.z,\n )\n\n psi = atan2((2 * (q0 * q3 + q1 * q2)),\n (1 - 2 * (pow(q2, 2) + pow(q3, 2))))\n\n self.current_heading_g = degrees(psi) - self.local_offset_g", "def step(self):\r\n\r\n self.velocity = 1\r\n new_pos = self.pos\r\n self.model.space.move_agent(self, new_pos)", "def execute_tp(self):\n self.status_message = \"State: Execute TP- Executing Motion Plan with trajectory planner\"\n self.current_state = \"execute_tp\"\n self.next_state = \"idle\"\n collect_info = threading.Thread(\n target=self.write_joint_pos, args=(\"tp_wp\",))\n collect_info.start()\n for wp in self.waypoints:\n # Ensure the correct number of joint angles\n full_wp = [0.0] * self.rexarm.num_joints\n full_wp[0:len(wp)] = wp\n self.tp.set_initial_wp()\n self.tp.set_final_wp(full_wp)\n\n if self.next_state == \"estop\":\n break\n # TODO: Set the positions and break if estop is needed\n self.tp.go()\n # self.rexarm.set_positions(wp)\n # time.sleep(1.5)", "def __init__(self, robot_pose, robot_info):\n K3Supervisor.__init__(self, robot_pose, robot_info)\n\n # The maximal distance to an obstacle (inexact)\n self.distmax = robot_info.ir_sensors.rmax + robot_info.wheels.base_length/2\n\n # Fill in some parameters\n self.parameters.sensor_poses = robot_info.ir_sensors.poses[:]\n self.parameters.ir_max = robot_info.ir_sensors.rmax\n self.parameters.direction = 'left'\n self.parameters.distance = self.distmax*0.85\n \n self.process_state_info(robot_info)\n \n #Add controllers\n self.gtg = self.create_controller('GoToGoal', self.parameters)\n self.avoidobstacles = self.create_controller('AvoidObstacles', self.parameters)\n self.wall = self.create_controller('FollowWall', self.parameters)\n self.hold = self.create_controller('Hold', None)\n \n # Define transitions\n self.add_controller(self.hold,\n (lambda: not self.at_goal(), self.gtg))\n self.add_controller(self.gtg,\n (self.at_goal, self.hold),\n (self.at_wall, self.wall))\n self.add_controller(self.wall,\n (self.at_goal,self.hold),\n (self.unsafe, self.avoidobstacles),\n (self.wall_cleared, self.gtg))\n self.add_controller(self.avoidobstacles,\n (self.at_goal, self.hold),\n (self.safe, self.wall))\n\n # Start in the 'go-to-goal' state\n self.current = self.gtg", "def test_model(sess, X, policy, game):\n\n\t# four character code object for video writer\n\tex = cv2.VideoWriter_fourcc('M','J','P','G')\n\t# video writer object\n\tout = cv2.VideoWriter(\"./test/pg_agent_{}.avi\".format(time.ctime()), ex, 5.0, (160, 210))\n\t\n\tmodel_point, computer_point = 0, 0\n\tcur_frame = skip_initial_frames(game)\n\t# Stacking 4 frames to capture motion\n\tcur_state = stack((cur_frame, cur_frame, cur_frame, cur_frame), axis=3)\n\tcur_state = reshape(cur_state, (80, 80, 4))\n\n\t# Game begins\n\twhile model_point < 13 and computer_point < 13:\n\t\tcur_policy = sess.run(policy, feed_dict = {X : reshape(cur_state, (1, 80, 80, 4))})\n\t\taction = 2 if cur_policy[0][0] >= 0.5 else 3\n\t\tnext_frame, reward = game.play(action)\n\t\t# write frame to video writer\n\t\tout.write(next_frame)\n\t\tif reward != 0:\n\t\t\tif reward == -1:\n\t\t\t\tcomputer_point += 1\n\t\t\telse:\n\t\t\t\tmodel_point += 1\n\t\t\tcur_frame = skip_initial_frames(game)\n\t\t\tcur_state = stack((cur_frame, cur_frame, cur_frame, cur_frame), axis=3)\n\t\t\tcur_state = reshape(cur_state, (80, 80, 4))\n\t\telse:\n\t\t\tcur_state = append(preprocess_frame(next_frame), cur_state[:, :, 0:3], axis=2)\n\n\tout.release()", "def move(self, theta, phi):\n print('Writing the theta angle: {}'.format(str(theta)))\n self.servo.write(theta)\n #print(\"Serial Port: \" + str(self.servo.read()))\n print('Writing the theta angle: {}'.format(str(phi)))\n self.servo.write(phi)\n #print(\"Serial Port: \" + str(self.servo.read()))", "def run_agent(self):\n do_plot = False\n\n # -- Load and init the Helper mission --#\n print('Generate and load the ' + self.mission_type + ' mission with seed ' + str(\n self.mission_seed) + ' allowing ' + self.AGENT_MOVEMENT_TYPE + ' movements')\n mission_xml, reward_goal, reward_intermediate, n_intermediate_rewards, reward_timeout, reward_sendcommand, timeout = init_mission(\n self.agent_host, self.agent_port, self.AGENT_NAME, self.mission_type, self.mission_seed,\n self.AGENT_MOVEMENT_TYPE)\n self.solution_report.setMissionXML(mission_xml)\n\n # -- Define local capabilities of the agent (sensors)--#\n self.agent_host.setObservationsPolicy(MalmoPython.ObservationsPolicy.LATEST_OBSERVATION_ONLY)\n self.agent_host.setVideoPolicy(MalmoPython.VideoPolicy.LATEST_FRAME_ONLY)\n self.agent_host.setRewardsPolicy(MalmoPython.RewardsPolicy.KEEP_ALL_REWARDS)\n\n time.sleep(1)\n\n # -- Get the state of the world along with internal agent state...--#\n state_t = self.agent_host.getWorldState()\n\n # -- Get a state-space model by observing the Orcale/GridObserver--#\n if state_t.is_mission_running:\n # -- Make sure we look in the right direction when observing the surrounding (otherwise the coordinate system will rotated by the Yaw !) --#\n # Look East (towards +x (east) and +z (south) on the right, i.e. a std x,y coordinate system) yaw=-90\n self.agent_host.sendCommand(\"setPitch 20\")\n time.sleep(1)\n self.agent_host.sendCommand(\"setYaw -90\")\n time.sleep(1)\n\n # -- Basic map --#\n state_t = self.agent_host.getWorldState()\n\n if state_t.number_of_observations_since_last_state > 0:\n msg = state_t.observations[-1].text # Get the details for the last observed state\n oracle_and_internal = json.loads(msg) # Parse the Oracle JSON\n grid = oracle_and_internal.get(u'grid', 0)\n xpos = oracle_and_internal.get(u'XPos', 0)\n zpos = oracle_and_internal.get(u'ZPos', 0)\n ypos = oracle_and_internal.get(u'YPos', 0)\n yaw = oracle_and_internal.get(u'Yaw', 0)\n pitch = oracle_and_internal.get(u'Pitch', 0)\n\n # -- Parste the JOSN string, Note there are better ways of doing this! --#\n full_state_map_raw = str(grid)\n full_state_map_raw = full_state_map_raw.replace(\"[\", \"\")\n full_state_map_raw = full_state_map_raw.replace(\"]\", \"\")\n full_state_map_raw = full_state_map_raw.replace(\"u'\", \"\")\n full_state_map_raw = full_state_map_raw.replace(\"'\", \"\")\n full_state_map_raw = full_state_map_raw.replace(\" \", \"\")\n aa = full_state_map_raw.split(\",\")\n vocs = list(set(aa))\n for word in vocs:\n for i in range(0, len(aa)):\n if aa[i] == word:\n aa[i] = vocs.index(word)\n\n X = np.asarray(aa);\n nn = int(math.sqrt(X.size))\n X = np.reshape(X, [nn, nn]) # Note: this matrix/table is index as z,x\n\n # -- Visualize the discrete state-space --#\n if do_plot:\n print yaw\n plt.figure(1)\n imgplot = plt.imshow(X.astype('float'), interpolation='none')\n plt.pause(4)\n # plt.show()\n\n # -- Define the unique states available --#\n state_wall = vocs.index(\"stained_hardened_clay\")\n state_impossible = vocs.index(\"stone\")\n state_initial = vocs.index(\"emerald_block\")\n state_goal = vocs.index(\"redstone_block\")\n\n # -- Extract state-space --#\n offset_x = 100 - math.floor(xpos);\n offset_z = 100 - math.floor(zpos);\n\n state_space_locations = {}; # create a dict\n\n for i_z in range(0, len(X)):\n for j_x in range(0, len(X)):\n if X[i_z, j_x] != state_impossible and X[i_z, j_x] != state_wall:\n state_id = \"S_\" + str(int(j_x - offset_x)) + \"_\" + str(int(i_z - offset_z))\n state_space_locations[state_id] = (int(j_x - offset_x), int(i_z - offset_z))\n if X[i_z, j_x] == state_initial:\n state_initial_id = state_id\n loc_start = state_space_locations[state_id]\n elif X[i_z, j_x] == state_goal:\n state_goal_id = state_id\n loc_goal = state_space_locations[state_id]\n\n # -- Generate state / action list --#\n # First define the set of actions in the defined coordinate system \n actions = {\"west\": [-1, 0], \"east\": [+1, 0], \"north\": [0, -1], \"south\": [0, +1]}\n state_space_actions = {}\n for state_id in state_space_locations:\n possible_states = {}\n for action in actions:\n # -- Check if a specific action is possible --#\n delta = actions.get(action)\n state_loc = state_space_locations.get(state_id)\n state_loc_post_action = [state_loc[0] + delta[0], state_loc[1] + delta[1]]\n\n # -- Check if the new possible state is in the state_space, i.e., is accessible --#\n state_id_post_action = \"S_\" + str(state_loc_post_action[0]) + \"_\" + str(\n state_loc_post_action[1])\n if state_space_locations.get(state_id_post_action) != None:\n possible_states[state_id_post_action] = 1\n\n # -- Add the possible actions for this state to the global dict --#\n state_space_actions[state_id] = possible_states\n\n # -- Kill the agent/mission --#\n agent_host.sendCommand(\"tp \" + str(0) + \" \" + str(0) + \" \" + str(0))\n time.sleep(2)\n\n # -- Save the info an instance of the StateSpace class --\n self.state_space.state_actions = state_space_actions\n self.state_space.state_locations = state_space_locations\n self.state_space.start_id = state_initial_id\n self.state_space.start_loc = loc_start\n self.state_space.goal_id = state_goal_id\n self.state_space.goal_loc = loc_goal\n\n # -- Reward location and values --#\n # OPTIONAL: If you want to account for the intermediate rewards \n # in the Random/Simple agent (or in your analysis) you can \n # obtain ground-truth by teleporting with the tp command \n # to all states and detect whether you recieve recieve a \n # diamond or not using the inventory field in the oracle variable \n #\n # As default the state_space_rewards is just set to contain \n # the goal state which is found above.\n # \n state_space_rewards = {}\n state_space_rewards[state_goal_id] = reward_goal\n\n # HINT: You can insert your own code for getting \n # the location of the intermediate rewards\n # and populate the state_space_rewards dict \n # with more information (optional). \n # WARNING: This is a bit tricky, please consult tutors before starting\n\n # -- Set the values in the state_space container --#\n self.state_space.reward_states = state_space_rewards\n self.state_space.reward_states_n = n_intermediate_rewards + 1\n self.state_space.reward_timeout = reward_timeout\n self.state_space.timeout = timeout\n self.state_space.reward_sendcommand = reward_sendcommand\n else:\n self.state_space = None\n # -- End if observations --#\n\n return", "def PlanToEndEffectorPose(self, robot, goal_pose, **kw_args):\n from prpy.planning.exceptions import CollisionPlanningError\n from prpy.planning.exceptions import SelfCollisionPlanningError\n\n ikp = openravepy.IkParameterizationType\n ikfo = openravepy.IkFilterOptions\n\n # Find an IK solution. OpenRAVE tries to return a solution that is\n # close to the configuration of the arm, so we don't need to do any\n # custom IK ranking.\n manipulator = robot.GetActiveManipulator()\n ik_param = openravepy.IkParameterization(goal_pose, ikp.Transform6D)\n ik_solution = manipulator.FindIKSolution(\n ik_param, ikfo.CheckEnvCollisions,\n ikreturn=False, releasegil=True\n )\n\n if ik_solution is None:\n # FindIKSolutions is slower than FindIKSolution,\n # so call this only to identify and raise error when\n # there is no solution\n ik_solutions = manipulator.FindIKSolutions(\n ik_param, ikfo.IgnoreSelfCollisions,\n ikreturn=False, releasegil=True)\n\n for q in ik_solutions:\n robot.SetActiveDOFValues(q)\n report = openravepy.CollisionReport()\n if self.env.CheckCollision(robot, report=report):\n raise CollisionPlanningError.FromReport(report, deterministic=True)\n elif robot.CheckSelfCollision(report=report):\n raise SelfCollisionPlanningError.FromReport(report, deterministic=True)\n\n raise PlanningError(\n 'There is no IK solution at the goal pose.', deterministic=True)\n\n return self._Snap(robot, ik_solution, **kw_args)", "def send_ned_velocity(uvwLoc):\n vel = 5. * 2. # m/s at average cdf=.5\n m = norm(7.8, 15.22).cdf(np.sum(uvwLoc**2.))\n uvwLocNorm = -(uvwLoc / np.abs(uvwLoc).max())\n uvwVel = m * vel * uvwLocNorm\n velocity_x, velocity_y, velocity_z = uvwVel[1], uvwVel[0], uvwVel[2]\n rospy.logdebug(\"{} m={} mXv={} uvw={} norm={} vel={}\".format(tag, m, m*vel, uvwLoc, uvwLocNorm, velocity_x, velocity_y, velocity_z))\n if vehicle.mode.name != \"GUIDED\":\n rospy.logerr(\"{} send_ned_ve not in Guided mode {}\".format(tag, vehicle.mode.name))\n return\n\n msg = vehicle.message_factory.set_position_target_local_ned_encode(\n 0, # time_boot_ms (not used)\n 0, 0, # target system, target component\n mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame\n 0b0000111111000111, # type_mask (only speeds enabled)\n 0, 0, 0, # x, y, z positions (not used)\n velocity_x, velocity_y, velocity_z, # x, y, z velocity in m/s\n 0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)\n 0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)\n # send command to vehicle on 1 Hz cycle\n vehicle.send_mavlink(msg)\n # for _ in range(2):\n # vehicle.send_mavlink(msg)\n # rospy.sleep(rospy.Duration.from_sec(.15))", "def advance_simulation(self):\n ts = self.sim.getTimeStep()\n old_rpos = self.sim.getAgentPosition(self.robot_num)\n # Move all the agents towards their goals\n for agent in self.agents:\n p = self.sim.getAgentPosition(agent)\n g = self.goals[agent]\n vec = ((g[0] - p[0])/ts, (g[1] - p[1])/ts)\n self.sim.setAgentPrefVelocity(agent, vec)\n self.sim.doStep()\n # Check and see if the robot is in collision and reset to old position\n # if so (backtrack on this step)\n # for agent in self.agents:\n # if (agent != self.robot_num\n # and utils.dist(self.sim.getAgentPosition(self.robot_num),\n # self.sim.getAgentPosition(agent)) < (\n # self.sim.getAgentRadius(self.robot_num) +\n # self.sim.getAgentRadius(agent))):\n # self.sim.setAgentPosition(self.robot_num, old_rpos)\n if self.file is not None:\n self.update_visualization()", "def set_goal(self, robot_id, task, pub_msg): \n pub_names = self.goal_pubs.keys()\n pub_objs = self.goal_pubs.values()\n for i in range(len(pub_names)):\n if robot_id == int(pub_names[i]):\n Goal = MoveBaseActionGoal()\n Goal.header.stamp = rospy.Time.now()\n Goal.header.frame_id = ''\n Goal.goal_id.stamp = rospy.Time.now()\n Goal.goal_id.id = str(int(task[0]))\n Goal.goal.target_pose.header.stamp = rospy.Time.now()\n Goal.goal.target_pose.header.frame_id = 'map'\n Goal.goal.target_pose.pose.position.x = task[1]\n Goal.goal.target_pose.pose.position.y = task[2]\n z_rot_rad = task[3] * np.pi / 180\n q = quaternion_from_euler(0, 0, z_rot_rad)\n Goal.goal.target_pose.pose.orientation.z = q[2]\n Goal.goal.target_pose.pose.orientation.w = q[3]\n pub_obj = pub_objs[i]\n pub_obj.publish(Goal)\n print(\"Goal set for robot \" + str(robot_id) + \". Task id: \" + str(int(task[0])) + \".\")\n msg_str = \"Goal set for robot \" + str(robot_id) + \". Task id: \" + str(int(task[0])) + \". Time: %s\" % rospy.Time.now().to_sec()\n pub_msg.publish(msg_str)\n break\n else:\n pass", "def _route_to_goal(self, position, orientation):\n _, (_x,_y) = self._calc_torus_distance(position, self.goal)\n move = None\n\n if orientation == 'up':\n if self.goal[1] > position[1] and _y > 0:\n move = 'move'\n elif self.goal[1] < position[1] and _y < 1:\n move = 'move'\n elif self.goal[0] > position[0]:\n if _x > 0:\n move = 'left'\n else:\n move = 'right'\n else:\n if _x > 0:\n move = 'right'\n else:\n move = 'left'\n\n if orientation == 'down':\n if self.goal[1] < position[1] and _y > 0:\n move = 'move'\n elif self.goal[1] > position[1] and _y < 1:\n move = 'move'\n elif self.goal[0] > position[0]:\n if _x > 0:\n move = 'right'\n else:\n move = 'left'\n else:\n if _x > 0:\n move = 'left'\n else:\n move = 'right'\n\n if orientation == 'right':\n if self.goal[0] < position[0] and _x > 0:\n move = 'move'\n elif self.goal[0] > position[0] and _x < 1:\n move = 'move'\n elif self.goal[1] > position[1]:\n if _y > 0:\n move = 'left'\n else:\n move = 'right'\n else:\n if _y > 0:\n move = 'right'\n else:\n move = 'left'\n\n if orientation == 'left':\n if self.goal[0] > position[0] and _x > 0:\n move = 'move'\n elif self.goal[0] < position[0] and _x < 1:\n move = 'move'\n elif self.goal[1] > position[1]:\n if _y > 0:\n move = 'right'\n else:\n move = 'left'\n else:\n if _y > 0:\n move = 'left'\n else:\n move = 'right'\n\n return move", "def main(argv) -> None:\n rospy.init_node('waypoint_node')\n # Register publishers first\n pub_reached = rospy.Publisher(\"~reached\", String,\n queue_size=1) # FIXME decide queue_size\n\n # Register subscribers\n ds = __DroneStates()\n # For middleware\n waypoint_topic_name = \"~waypoint\"\n _ = rospy.Subscriber(waypoint_topic_name, PoseStamped, ds.store_waypoint)\n\n # Register actionlib clients\n takeoff_topic = rospy.resolve_name(\"action/takeoff\")\n takeoff_client = SimpleActionClient(takeoff_topic, TakeoffAction)\n landing_topic = rospy.resolve_name(\"action/landing\")\n landing_client = SimpleActionClient(landing_topic, LandingAction)\n\n pose_topic = rospy.resolve_name(\"action/pose\")\n pose_client = SimpleActionClient(pose_topic, PoseAction)\n\n def action_pose_done_cb(goal_state, goal_result):\n rospy.logdebug(\"Reached\\n %s\" % str(ds.curr_waypoint.pose.position))\n ds.reset_curr_waypoint()\n\n def shutdown() -> None: # TODO Better place for this code\n \"\"\"Stop the drone when this ROS node shuts down\"\"\"\n # TODO Safe landing\n pass\n\n rospy.on_shutdown(shutdown)\n\n # TODO Wait for hector quadrotor controllers to spawn\n rospy.sleep(1)\n\n rate = rospy.Rate(100) # 100Hz TODO Pass sleep rate as a parameter?\n\n is_driving = False\n while not rospy.is_shutdown():\n rate.sleep()\n # Simple controller code for drones # TODO Need better controller\n if not is_driving: # IDLE\n if ds._waypoints.empty(): # FIXME accessing protected member\n pass # Keep idling\n else:\n ds.set_curr_waypoint()\n pose_client.wait_for_server()\n\n pose_goal = PoseGoal(target_pose=ds.target_pose())\n rospy.logdebug(\"Sending pose goal\\n %s\" % str(pose_goal))\n\n pose_client.send_goal(PoseGoal(target_pose=ds.target_pose()),\n done_cb=action_pose_done_cb)\n is_driving = True\n else: # DRIVING\n if ds.reached == ReachedEnum.NO:\n pass # Keep driving\n else:\n if ds.reached == ReachedEnum.YES_AND_REPORT:\n pub_reached.publish(ds.report_reached())\n is_driving = False", "def step(self, action=[], simulation=False, m1=0, m2=0):\n\n # receive m1 and m2 if using it for the Uvirobot_model simulation\n if not simulation:\n m1, m2 = self._dediscretize_action(action)\n\n if not self.differential_car: # Ackerman model. Cambiado == por Not.\n # m1 = orientation m2= engine\n\n wm1 = (16.257 * (m1 - 180) / 75) + np.random.uniform(-0.3, 0.3, 1)[0]\n\n # the negative sign is because it turns to the left with PWM 0-127\n # and for us turning to the left is positive w_ang\n wm2 = - self.alpha_ack * (m2 - 128) / 127 + np.random.uniform(-0.3, 0.3, 1)[0]\n\n self.v_linear = wm1*self.r_ack*np.cos(wm2)\n self.w_ang = -(wm1*self.r_ack*np.cos(wm2)*np.tan(wm2))/self.l_ack\n\n else: # differential model\n # PWM to rads conversion\n wm1 = (25 * (m1 - 145) / 110) + np.random.uniform(-1, 1, 1)[0]\n wm2 = (25 * (m2 - 145) / 110) + np.random.uniform(-1, 1, 1)[0]\n\n\n # Calculate linear and angular velocity\n self.v_linear = (wm2 + wm1) * (self.r / 2)\n\n # wm1 - wm2 because m1 is the engine of the right\n # changed old ecuation because it was wrong and divided /3.35 to make it like the wrong ecuation that worked\n\n if not self.discrete_input:\n self.w_ang = (wm1 - wm2) * (self.r / self.rho)\n else:\n self.w_ang = (wm1 - wm2) * (2*self.r / self.rho)\n\n # Calculate position and theta\n self.x = self.x + self.v_linear * math.cos(self.theta) * self.time\n self.y = self.y + self.v_linear * math.sin(self.theta) * self.time\n self.theta = self.theta + self.w_ang * self.time\n\n # to set theta between [0,2pi]\n if self.theta > 2*math.pi:\n self.theta = self.theta-2*math.pi\n elif self.theta < 0:\n self.theta = self.theta+2*math.pi\n\n # return the state if i´m using it for the uvirobot_model simulation\n if simulation:\n return self.x, self.y, self.theta\n\n # add noise to position and theta\n # self.x_noise = self.x + np.random.normal(self.mu, self.sigmaxy, 1)\n # self.y_noise = self.y + np.random.normal(self.mu, self.sigmaxy, 1)\n # self.theta_noise = self.theta + np.random.normal(self.mu,\n # self.sigmaangle, 1)\n\n # Calculate the distance to the closest point in trajectory,\n # depending on distance, delta theta (ugv to trajectory) and distance\n # covered in this step\n self._distance_next()\n self._calc_zone()\n self._calc_delta_theta()\n self._distance_covered()\n # I want to know how far it went to give reward each 50 points\n\n # Calculate done and reward\n # Only want this end for open circuit\n if self.index == (len(self.x_trajectory) - 1) and not self.closed:\n done = 1\n reward = 20\n\n elif (self.x > self.max_x) or (self.x < -self.max_x) or \\\n (self.y < -self.max_y) or (self.y > self.max_y):\n done = 1\n # It had a reward of -10 but doesnt make sense cause the car doesnt\n # know where it is\n reward = 0\n\n elif self.steps >= self.max_steps:\n done = 1\n # Reward of -10 if its open circuit, for closed circuit reward = 0\n # because it wouldnt make sense to punish because it is infinite\n if self.closed:\n reward = 0\n else:\n reward = -50\n\n # elif math.fabs(self.delta_theta) > math.pi/2:\n # done = 1\n # reward = -10\n\n elif self.zone_reward == 3:\n done = 1\n if self.discrete_input:\n reward = -100\n else:\n reward = -10\n\n else:\n done = 0\n # I removed Christians rewards\n reward = -1 * BETA_DIST * math.fabs(self.distance) + \\\n BETA_GAP * self.gap\n\n if (self.index//50) > self.farthest:\n self.farthest = self.index//50\n reward += 5\n#\n # Number of iterations in a episode\n self.steps += 1\n\n if self.discrete_input:\n # discretize state for the agent to control\n\n discrete_distance, discrete_delta_theta \\\n = self._discretize_agent_state(self.distance, self.delta_theta)\n\n self.agent_state = np.array([discrete_distance,\n discrete_delta_theta])\n else:\n # self.agent_state has to be a matrix to be accepted by keras\n self.agent_state = np.array([self.distance, self.delta_theta])\n\n # self.norm_distance=(self.distance+0.071)/(0.071*2)\n # self.norm_delta_theta=(self.delta_theta+np.pi)/(2*np.pi)\n\n # Create state (x,y,theta)\n self.state = [self.x, self.y, self.theta]\n # print(self.state,self.sign)\n\n return self.state, self.agent_state, reward, done", "async def after_tick(self):\n\n msg = Message(\n to=self.agent.factory_jid,\n body=self.agent.position.to_json()\n )\n msg.set_metadata(\"performative\", \"inform\")\n await self.send(msg)", "def set_new_pose(circuit_positions_set):\n position = random.choice(list(enumerate(circuit_positions_set)))[0]\n print(position)\n # pos_number = int(circuit_positions_set[0])\n\n state = ModelState()\n state.model_name = \"f1_renault\"\n state.pose.position.x = circuit_positions_set[position][1]\n state.pose.position.y = circuit_positions_set[position][2]\n state.pose.position.z = circuit_positions_set[position][3]\n state.pose.orientation.x = circuit_positions_set[position][4]\n state.pose.orientation.y = circuit_positions_set[position][5]\n state.pose.orientation.z = circuit_positions_set[position][6]\n state.pose.orientation.w = circuit_positions_set[position][7]\n\n rospy.wait_for_service(\"/gazebo/set_model_state\")\n try:\n set_state = rospy.ServiceProxy(\"/gazebo/set_model_state\", SetModelState)\n set_state(state)\n except rospy.ServiceException as e:\n print(\"Service call failed: {}\".format(e))\n return position", "def move(self):\n possible_steps = self.model.grid.get_neighborhood(\n self.pos,\n moore=False, # implements Von Neumann neighborhood\n include_center=False)\n new_position = self.random.choice(possible_steps)\n self.heading = [new_position[0] - self.pos[0],\n new_position[1] - self.pos[1]]\n self.model.grid.move_agent(self, new_position)", "def publish_messages(self, V_translation, V_rotation, terrain_grid_points, V_viz_points, frame_J1, frame_J2):\n\n ##################################################################################\n\n # Create a posestamped message containing position information\n\n # Create pose message\n msg = PoseStamped()\n\n # Header details for pose message\n msg.header.frame_id = \"map\"\n msg.header.stamp = rospy.Time.now()\n\n # Pose information\n msg.pose.position.x = V_translation[0]\n msg.pose.position.y = V_translation[1]\n msg.pose.position.z = V_translation[2]\n msg.pose.orientation.x = V_rotation[0]\n msg.pose.orientation.y = V_rotation[1]\n msg.pose.orientation.z = V_rotation[2]\n msg.pose.orientation.w = V_rotation[3]\n\n\n ##################################################################################\n\n # Create an multi array message containing pose information\n\n # Create array message\n array_msg = Float32MultiArray()\n array_msg.layout.dim.append(MultiArrayDimension())\n array_msg.layout.dim[0].label = \"vehicle_position\"\n array_msg.layout.dim[0].size = 3\n array_msg.layout.dim[0].stride = 3\n\n # Append data\n array_msg.data.append(V_translation[0])\n array_msg.data.append(V_translation[1])\n array_msg.data.append(V_translation[2])\n\n ##################################################################################\n\n # Create point cloud and publish to rviz\n\n # Create a point cloud from the xyz values in the array list\n header = Header()\n header.stamp = rospy.Time.now()\n header.frame_id = 'map'\n point_cloud = pcl2.create_cloud_xyz32(header, terrain_grid_points)\n\n ##################################################################################\n\n # Create a marker to vizualize the footprint of the vehicle\n viz_points = Marker()\n viz_points.header.frame_id = \"map\"\n viz_points.header.stamp = rospy.Time.now()\n viz_points.ns = \"grid_marker\"\n viz_points.id = 1\n viz_points.action = viz_points.ADD\n viz_points.type = viz_points.CUBE_LIST\n\n viz_points.scale.x = 0.01\n viz_points.scale.y = 0.01\n viz_points.scale.z = 0.01\n\n viz_points.color.a = 1.0\n viz_points.color.r = 1.0\n viz_points.color.g = 0.0\n viz_points.color.b = 0.0\n viz_points.points = V_viz_points\n\n\n ################################################################\n\n # Create pose message for joints 1 & 2\n msg1 = PoseStamped()\n msg2 = PoseStamped()\n\n # Header details for pose message\n msg1.header.frame_id = \"vehicle_frame\"\n msg1.header.stamp = rospy.Time.now()\n\n msg2.header.frame_id = \"vehicle_frame\"\n msg2.header.stamp = rospy.Time.now()\n\n # Pose information\n joint_1 = tf_conversions.toMsg(frame_J1)\n joint_2 = tf_conversions.toMsg(frame_J2)\n \n msg1.pose = joint_1\n msg2.pose = joint_2\n\n # Publish pose, vizualization, array information and point cloud\n self.pose_publisher.publish(msg)\n self.joint1_pose_publisher.publish(msg1)\n self.joint2_pose_publisher.publish(msg2)\n self.pose_array_publisher.publish(array_msg)\n self.point_cloud_publisher.publish(point_cloud)\n self.grid_publisher.publish(viz_points)", "def step_simulation(self, action):\n # target = np.zeros(6)\n # a = np.copy(action)\n # for i in range(6):\n # target[i] = a[i] + ref_pos[i + 3]\n\n target = action * 1.5\n # target = action + ref_pos[3:9]\n\n joint_angle_4, joint_velocity_4 = self.get_joint_angle_and_velocity(4)\n joint_angle_7, joint_velocity_7 = self.get_joint_angle_and_velocity(7)\n self.joint_history.append(np.asarray([joint_angle_4, joint_velocity_4, joint_angle_7, joint_velocity_7]))\n\n joint_angles = self.robot_skeleton.q[3:]\n joint_velocities = self.robot_skeleton.dq[3:]\n\n tau = np.zeros(self.robot_skeleton.ndofs) # torque to apply at each simulation clock\n tau[3:] = self.P * (target - joint_angles) - self.D * joint_velocities\n tau = np.clip(tau, -150 * self.volume_scaling, 150 * self.volume_scaling)\n self.tau_history.append(tau)\n # print(tau)\n self.do_simulation(tau, 1)", "def sendAgentMovement(self, degree, distance):\r\n print \"SEND & WAIT: AgentMovement\"\r\n waitForFullExec(self, self.sender.sendAgentMovement(degree, distance))", "def main():\n arg_fmt = argparse.RawDescriptionHelpFormatter\n parser = argparse.ArgumentParser(formatter_class=arg_fmt,\n description=main.__doc__)\n parser.add_argument(\n \"-q\", \"--joint_angles\", type=float,\n nargs='+', default=[0.158984375, 0.665759765625, -1.53172265625, 1.0492724609375, 0.8098212890625, -1.0504248046875, 2.89727734375],\n help=\"A list of joint angles, one for each of the 7 joints, J0...J6\")\n parser.add_argument(\n \"-s\", \"--speed_ratio\", type=float, default=0.1,\n help=\"A value between 0.001 (slow) and 1.0 (maximum joint velocity)\")\n parser.add_argument(\n \"-a\", \"--accel_ratio\", type=float, default=0.1,\n help=\"A value between 0.001 (slow) and 1.0 (maximum joint accel)\")\n parser.add_argument(\n \"--timeout\", type=float, default=None,\n help=\"Max time in seconds to complete motion goal before returning. None is interpreted as an infinite timeout.\")\n args = parser.parse_args(rospy.myargv()[1:])\n\n try:\n rospy.init_node('go_to_joint_angles_py')\n limb = Limb()\n traj = MotionTrajectory(limb = limb)\n\n wpt_opts = MotionWaypointOptions(max_joint_speed_ratio=args.speed_ratio,\n max_joint_accel=args.accel_ratio)\n waypoint = MotionWaypoint(options = wpt_opts.to_msg(), limb = limb)\n\n joint_angles = limb.joint_ordered_angles()\n\n waypoint.set_joint_angles(joint_angles = joint_angles)\n traj.append_waypoint(waypoint.to_msg())\n\n if len(args.joint_angles) != len(joint_angles):\n rospy.logerr('The number of joint_angles must be %d', len(joint_angles))\n return None\n\n waypoint.set_joint_angles(joint_angles = args.joint_angles)\n traj.append_waypoint(waypoint.to_msg())\n\n # slight right (20 degree)\n # waypoint.set_joint_angles([-0.155232421875, 0.4621865234375, -0.3448271484375, 0.4330361328125, 0.017708984375, -0.946375, 3])\n # traj.append_waypoint(waypoint.to_msg())\n #\n # # neutral position\n # waypoint.set_joint_angles([-0.155232421875, 0.4621865234375, -0.3448271484375, 0.4330361328125, 0.017708984375, -0.946375, 2.040958984375])\n # traj.append_waypoint(waypoint.to_msg())\n #\n # # slight left (20 degree)\n # waypoint.set_joint_angles([0.178685546875, -0.2291533203125, -0.7179814453125, 1.633763671875, 2.1484375e-05, -1.3499716796875, 2.0902988281250003])\n # traj.append_waypoint(waypoint.to_msg())\n #\n # # neutral position\n # waypoint.set_joint_angles([0.178685546875, -0.2291533203125, -0.7179814453125, 1.633763671875, 2.1484375e-05, -1.3499716796875, 2.439298828125])\n # traj.append_waypoint(waypoint.to_msg())\n #\n # # slight front (20 degree)\n # waypoint.set_joint_angles([0.1974599609375, -0.27071484375, -0.7559970703125, 1.5779091796875, -0.14858203125, -1.1271669921875, 2.5262158203125])\n # traj.append_waypoint(waypoint.to_msg())\n #\n # # neutral position\n # waypoint.set_joint_angles([0.178685546875, -0.2291533203125, -0.7179814453125, 1.633763671875, 2.1484375e-05, -1.3499716796875, 2.439298828125])\n # traj.append_waypoint(waypoint.to_msg())\n #\n # # slight back (20 degree)\n # waypoint.set_joint_angles([0.1611396484375, -0.174541015625, -0.6814091796875, 1.6546083984375, 0.1373291015625, -1.5653515625, 2.39933984375])\n # traj.append_waypoint(waypoint.to_msg())\n #\n # # neutral position\n # waypoint.set_joint_angles([0.178685546875, -0.2291533203125, -0.7179814453125, 1.633763671875, 2.1484375e-05, -1.3499716796875, 2.439298828125])\n # traj.append_waypoint(waypoint.to_msg())\n\n\n result = traj.send_trajectory(timeout=args.timeout)\n if result is None:\n rospy.logerr('Trajectory FAILED to send')\n return\n\n if result.result:\n rospy.loginfo('Motion controller successfully finished the trajectory!')\n else:\n rospy.logerr('Motion controller failed to complete the trajectory with error %s',\n result.errorId)\n except rospy.ROSInterruptException:\n rospy.logerr('Keyboard interrupt detected from the user. Exiting before trajectory completion.')", "def pose_callback(self, pose):\n while (not self.socket_available): # wait for socket to become available\n pass\n reply = None\n self.socket_available = False # Block other processes from using the socket while in use\n if self.robot.is_in_error():\n self.robot.ResetError()\n self.robot.ResumeMotion()\n if pose.position.z is not None:\n reply = self.robot.MovePose(pose.position.x, pose.position.y, pose.position.z, pose.orientation.x,\n pose.orientation.y, pose.orientation.z)\n else:\n reply = self.robot.MovePose(pose.position.x, pose.position.y, pose.orientation.x, pose.orientation.y)\n self.socket_available = True # Release socket so other processes can continue\n if reply is not None:\n self.reply_publisher.publish(reply)", "def send_pose(self, pPosn, pOrnt, baseFrame, trgtFrame, pTime):\n # Create message\n xform = TransformStamped()\n # Transform Header\n xform.header.stamp = pTime\n xform.header.frame_id = baseFrame\n xform.child_frame_id = trgtFrame\n # Position\n xform.transform.translation.x = pPosn[0]\n xform.transform.translation.y = pPosn[1]\n xform.transform.translation.z = pPosn[2]\n # Orientation\n xform.transform.rotation.x = pOrnt[0]\n xform.transform.rotation.y = pOrnt[1]\n xform.transform.rotation.z = pOrnt[2]\n xform.transform.rotation.w = pOrnt[3]\n # Send message\n self.moveOut.sendTransform(xform)", "def velocity_callback(self, msg_velocity):\n if self.last_pose.header.stamp.to_sec() > 0: # skip first frame\n\n dt = (msg_velocity.header.stamp - self.last_pose.header.stamp).to_sec()\n\n # Integrate the relative movement between the last pose and the current\n theta_delta = self.last_theta_dot * dt\n # to ensure no division by zero for radius calculation:\n if np.abs(self.last_theta_dot) < 0.000001:\n # straight line\n x_delta = self.last_v * dt\n y_delta = 0\n else:\n # arc of circle\n radius = self.last_v / self.last_theta_dot\n x_delta = radius * np.sin(theta_delta)\n y_delta = radius * (1.0 - np.cos(theta_delta))\n\n # Add to the previous to get absolute pose relative to the starting position\n theta_res = self.last_pose.theta + theta_delta\n x_res = self.last_pose.x + x_delta * np.cos(self.last_pose.theta) - y_delta * np.sin(self.last_pose.theta)\n y_res = self.last_pose.y + y_delta * np.cos(self.last_pose.theta) + x_delta * np.sin(self.last_pose.theta)\n\n # Update the stored last pose\n self.last_pose.theta = theta_res\n self.last_pose.x = x_res\n self.last_pose.y = y_res\n\n # Stuff the new pose into a message and publish\n msg_pose = Pose2DStamped()\n msg_pose.header = msg_velocity.header\n msg_pose.header.frame_id = self.veh_name\n msg_pose.theta = theta_res\n msg_pose.x = x_res\n msg_pose.y = y_res\n self.pub_pose.publish(msg_pose)\n\n self.last_pose.header.stamp = msg_velocity.header.stamp\n self.last_theta_dot = msg_velocity.omega\n self.last_v = msg_velocity.v", "def __init__(self, init_pose=[0.,0.,0.,0.,0.,0.], init_velocities=None, \n init_angle_velocities=None, runtime=5., target_pose = None):\n # Simulation\n self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime) \n self.action_repeat = 3\n\n self.state_size = self.action_repeat * 6\n self.action_low = 0\n self.action_high = 900\n self.action_size = 4\n\n # Goal0\n self.target_pos = target_pose[:3] if target_pose is not None else np.array([0,0,10])\n self.target_vel = target_pose[3:] if target_pose is not None else np.array([0,0,0])\n self.best_pose = init_pose\n self.best_reward = -np.inf\n \n self.vel_w = 0\n self.pos_w = 1", "def _target_callback(self, msg):\n self.target_pose = np.asarray(msg.pos)[np.newaxis].T\n self.target_vel = np.asarray(msg.vel)[np.newaxis].T\n self.target_acc = np.asarray(msg.acc)[np.newaxis].T\n\n print(\"\\nGoing to:\")\n print(\"Pos: \\n\" + str(self.target_pose))\n print(\"Vel: \\n\" + str(self.target_vel))\n print(\"Acc: \\n\" + str(self.target_acc))", "def move_up(self):\n\n # slowly drive backwards\n self.velocity = const.Driving.MAX_VELOCITY\n self.angle = const.Driving.NEUTRAL_STEERING_ANGLE\n\n # drive as long there is enough space to the next vehicle or obstacle\n gap = self.formation.calc_gap()\n self.start_driving()\n while self.sensor_manager.front > gap: continue\n\n self.stop_driving()", "def pose_cb(self, msg):\n rospy.loginfo(rospy.get_name() + ': pose received')\n self.current_pose = msg.pose", "def main(): #Main Control Loop (as prototyped on 2/26 in Glennan Lounge)\n\t# Create listener to receive info from UI\n\tui_listener = pso_network.UIListener()\n\tui_listener.daemon = True\n\tui_listener.start()\n\tui_state = ui_listener.get_ui()\n\t\n\t# Create listener to recieve waypoints and corrections from planner.\n\tplanner_listener = pso_network.PlannerListener()\n\tplanner_listener.daemon = True\n\tplanner_listener.start()\n\twaypoint = cv.CreateMat(4, 1, cv.CV_32FC1)\n\t\n\t#Instatiate Drone Objects (defined in Drone.py)\n\tmyDrone = Drone(\"192.168.1.1\")\n\t\n\t\n\t#Preset flags\n\trunning = True\n\twait_on_emergency = False\n\twait_on_liftoff = False\n\twait_on_land = False\n\t\n\t#Create Kalman filter, state, and command vectors\n\tkalman = PsoKalman()\n\tu = cv.CreateMat(4, 1, cv.CV_32FC1)\n\tz = cv.CreateMat(5, 1, cv.CV_32FC1)\n\tsys_time = time.time()\n\t\n\t#Create PID controllers for each axis\n\tyaw_pid = pso_pid.PID()\n\tyaw_pid.k = 1.5\n\tyaw_pid.t_i = 1.\n\tyaw_pid.angular = True\n\tyaw_pid.deadband = .05\n\t\n\tz_pid = pso_pid.PID()\n\tz_pid.k = .00075\n\tz_pid.i_enable = False\n\tz_pid.t_i = 10.\n\tz_pid.deadband = 150\n\t\n\troll_pid = pso_pid.PID()\n\troll_pid.k = .00025\n\troll_pid.i_enable = False\n\troll_pid.deadband = 50\n\t\n\tpitch_pid = pso_pid.PID()\n\tpitch_pid.k = .00025\n\tpitch_pid.i_enable = False\n\tpitch_pid.deadband = 50\n\t\n\t#Logger puts state in csv for matlab-y goodness\n\tlogger = debuglogger.Logger()\n\t\n\t#Fig bucking loop\n\twhile(running):\n\t\ttime.sleep(.05)\n\t\tos.system(\"clear\")\n\t\t\n\t\t#Get command state from UI\n\t\tprev_ui_state = ui_state\n\t\tui_state = ui_listener.get_ui()\n\t\t\t\t\n\t\tif ui_state[EMERGENCY]:\n\t\t\tmyDrone.emergency()\n\t\t\n\t\tif ui_state[SHUTDOWN]:\n\t\t\t#UI has ordered shutdown\n\t\t\tprint \"Shutting down control loop...\"\n\t\t\tui_listener.stop()\n\t\t\tmyDrone.kill()\n\t\t\trunning = False\n\t\t\n\t\tif ui_state[TRIM]:\n\t\t\tmyDrone.trim()\n\t\t\tui_listener.clear_flag(TRIM)\n\t\t\tprint \"\\nTRIM\\n\"\n\t\t\n\t\tif ui_state[FLYING]:\n\t\t\tmyDrone.takeoff()\n\t\t\tprint \"Taking Off/Flying\"\n\t\t\tif not prev_ui_state[FLYING]:\n\t\t\t\twait_on_liftoff = 5\n\t\telse:\n\t\t\tmyDrone.land()\n\t\t\tprint \"Landing/Landed\"\n\t\t\tif prev_ui_state[FLYING]:\n\t\t\t\twait_on_land = 5\n\t\t\n\t\tif ui_state[RESET]:\n\t\t\tmyDrone.reset_emergency()\n\t\t\tmyDrone.reset()\n\t\t\tyaw_pid.flush()\n\t\t\tz_pid.flush()\n\t\t\troll_pid.flush()\n\t\t\tpitch_pid.flush()\n\t\t\tui_listener.clear_flag(RESET)\n\t\t\n\t\t#Get navdata\n\t\tnav = myDrone.get_navdata()\n\t\t\n\t\t#Print out Drone State\n\t\tif nav.check_state(navdata.EMERGENCY):\n\t\t\tprint \"Emergency!\"\n\t\telif not nav.check_state(navdata.COM_WATCHDOG):\n\t\t\tprint \"WATCHDOG\"\n\t\telif nav.check_state(navdata.COMMAND):\n\t\t\tprint \"Watchdog cleared. Not yet ready for commands.\"\n\t\telse:\n\t\t\tprint \"Ready to Fly\\n\"\n\t\tprint \"\\t\\tECACAVNAPCUWAPTHLGCMBNTTTCUACVVF\\n{0}\\t\\t{1:32b}\".format(nav.seq,nav.state) #Print navdata state\n\t\t\n\t\t#Update State (Kalman)\n\t\tdt = time.time()-sys_time\n\t\tprint \"dt:\\t\",dt\n\t\tsys_time = time.time()\n\t\tz[0, 0], z[1, 0], z[2, 0], z[3, 0], z[4, 0] = nav.vx, nav.vy, nav.z, nav.vz, nav.psi\n\t\t#z and u need to be cv matrices!!!!\n\t\tsys_state = myDrone.get_state()\n\t\tprint \"\\nDrone Kalman State:\"\n\t\tprint \"x:\\t{0}\".format(sys_state[0, 0])\n\t\tprint \"y:\\t{0}\".format(sys_state[2, 0])\n\t\tprint \"z:\\t{0}\".format(sys_state[4, 0])\n\t\tprint \"vx:\\t{0}\".format(sys_state[1, 0])\n\t\tprint \"vy:\\t{0}\".format(sys_state[3, 0])\n\t\tprint \"vz:\\t{0}\".format(sys_state[5, 0])\n\t\tprint \"theta:\\t{0}\".format(sys_state[6, 0])\n\t\tprint \"vtheta:\\t{0}\".format(sys_state[7, 0])\n\t\t\n\t\tprint \"\\nNavdata Euler Angles:\"\n\t\tprint \"theta:\\t\",nav.theta\n\t\tprint \"phi:\\t\",nav.phi\n\t\tprint \"psi:\\t\",nav.psi\n\t\tprint \"\\nNavdata Stuff:\"\n\t\tprint \"z:\\t\",nav.z\n\t\tprint \"vx:\\t\",nav.vx\n\t\tprint \"vy:\\t\",nav.vy\n\t\tui_listener.set_state(sys_state, nav)\n\t\t#logger.log(sys_state)\n\t\t\n\t\tif wait_on_liftoff>0:\n\t\t\tprint \"Waiting for liftoff to finish\"\n\t\t\twait_on_liftoff -= dt\n\t\t\tu[0, 0], u[1, 0], u[2, 0], u[3, 0] = 0, 0, 1, 0#Assume drone goes full speed up when taking off\n\t\telif ui_state[FLYING]:\n\t\t\tprint \"\" #Blank line to everything lines up\n\t\t\t#If Drone is in waypoint mode, compute command\n\t\t\tif not ui_state[OVERRIDE]:\n\t\t\t\t#Get waypoint\n\t\t\t\tif not planner_listener.waypoints.empty():\n\t\t\t\t\twaypoint = planner_listener.waypoints.get()\n\t\t\t\tprint \"\\nNext Waypoint:\"\n\t\t\t\tprint \"X:\\t\", waypoint[0, 0]\n\t\t\t\tprint \"Y:\\t\", waypoint[1, 0]\n\t\t\t\tprint \"Z:\\t\", waypoint[2, 0]\n\t\t\t\tprint \"θ:\\t\", waypoint[3, 0]\n\t\t\t\t#Compute command\n\t\t\t\t(roll_des, pitch_des) = world2drone(waypoint[0, 0]-sys_state[0, 0], waypoint[1, 0]-sys_state[2, 0], sys_state[6, 0])\n\t\t\t\tprint \"Desired Roll:\\t\", roll_des\n\t\t\t\tprint \"Desired Pitch:\\t\", pitch_des\n\t\t\t\tu[0, 0] = pitch_pid.update(0, pitch_des)\n\t\t\t\tu[1, 0] = roll_pid.update(0, roll_des)\n\t\t\t\tu[2, 0] = z_pid.update(sys_state[4, 0], waypoint[2, 0])\n\t\t\t\tu[3, 0] = yaw_pid.update(sys_state[6, 0], waypoint[3, 0])\n\t\t\t\tmyDrone.go(u[0, 0], u[1, 0], u[3, 0], u[2, 0])\n\t\t\telse: #Manual override: Use command from UI state\n\t\t\t\tprint \"\\nManual override mode\\n\\n\\n\"\n\t\t\t\tmyDrone.go(ui_state[COMMAND][0], ui_state[COMMAND][1], ui_state[COMMAND][2], ui_state[COMMAND][3])\n\t\t\t\tu[0, 0], u[1, 0], u[2, 0], u[3, 0] = ui_state[COMMAND]\n\t\telse:\n\t\t\tprint \"\\nLanded\"\n\t\t\n\t\t#Print out commands\n\t\tprint \"\\nCommands:\\npitch:\\t\",u[0, 0]\n\t\tprint \"roll:\\t\", u[1, 0]\n\t\tprint \"gaz:\\t\", u[2, 0]\n\t\tprint \"yaw:\\t\", u[3, 0]", "def send_joint_trajectory(trajectory, velocities, frequency=250):\n pub = rospy.Publisher(\"/wam/jnt_pos_cmd\", RTJointPos, queue_size=10)\n #If wam_node is running, it will be connected to this publisher.\n #Mostly this loop is here because you want to make sure the publisher\n #gets set up before it starts sending information.\n while pub.get_num_connections() < 1:\n print \"Waiting on the publisher to go up.\"\n rospy.sleep(0.5)\n\n trajectory_length = trajectory.__len__()\n finished = False\n traj_row = 0\n message_for_service = RTJointPos()\n\n r = rospy.Rate(frequency)\n\n while not rospy.is_shutdown() and not finished:\n message_for_service.joints = trajectory[traj_row]\n message_for_service.rate_limits = velocities[traj_row]\n traj_row += 1\n pub.publish(message_for_service)\n if traj_row == trajectory_length - 1:\n finished = True\n r.sleep()", "def step(self):\n prey_neighbors = [x for x in self.model.space.get_neighbors(self.pos, self.vision+ 20, False) if isinstance(x,boid.Boid)]\n nearby_obstacles = [x for x in self.model.space.get_neighbors(self.pos, self.vision + 15, False) if isinstance(x, Obstacle)]\n self.velocity += (self.avoid_collision(nearby_obstacles) * self.collision_separation +\n self.attack(prey_neighbors)) / 2\n self.velocity /= np.linalg.norm(self.velocity)\n new_pos = self.pos + self.velocity * self.speed\n self.model.space.move_agent(self, new_pos)\n self.eat(prey_neighbors)\n\n\n # update for drawing\n self.update()", "def move_base(self, x, y, z):\n # fill ROS message\n goal = control_msgs.msg.FollowJointTrajectoryGoal()\n traj = trajectory_msgs.msg.JointTrajectory()\n traj.joint_names = [\"odom_x\", \"odom_y\", \"odom_t\"]\n p = trajectory_msgs.msg.JointTrajectoryPoint()\n p.positions = [x, y, z]\n p.velocities = [0, 0, 0]\n p.time_from_start = rospy.Time(15)\n traj.points = [p]\n goal.trajectory = traj\n\n # send message to the action server\n self.cli.send_goal(goal)\n\n # wait for the action server to complete the order\n self.cli.wait_for_result()\n return self._running", "def move(self, direction, proportional=False):\n servo = pygame.mixer.Sound(file='resources/Servo_Motor.wav')\n servo.set_volume(0.2)\n # validity of direction\n if not proportional:\n assert direction != 0 and -1 <= direction <= 1, \"No valid movement\"\n # p: eigene position, d * (vorne/hinten): positionsänderung\n self.pos = [p + (d * direction)\n for p, d in zip(self.pos, DIRECTIONS[self.rotation])]\n if self.speakers:\n self.speakers.play(servo)\n new_turn = \"{0}\".format(self.pos)\n self._call_gamelog_callbacks(new_turn)", "def vertices_to_baxter_hand_pose(grasp_vertices, approach_direction):\n # YOUR CODE HERE\n raise NotImplementedError" ]
[ "0.6900186", "0.6846976", "0.6781664", "0.6722921", "0.6500318", "0.647756", "0.641095", "0.63947874", "0.6390289", "0.6380875", "0.6371278", "0.6319203", "0.61569244", "0.6111631", "0.60957015", "0.6094904", "0.6075576", "0.6063163", "0.6058459", "0.60555637", "0.6015337", "0.6003554", "0.5989652", "0.59474933", "0.5933698", "0.5925335", "0.5907569", "0.5890557", "0.58349895", "0.5831664", "0.5794407", "0.5777507", "0.5776272", "0.5774226", "0.5774115", "0.5769881", "0.576655", "0.576655", "0.5765865", "0.5763539", "0.5761033", "0.5758466", "0.5739344", "0.57353014", "0.57347", "0.57283515", "0.5724492", "0.57205397", "0.5713202", "0.57120574", "0.5703053", "0.5700078", "0.56917405", "0.5666651", "0.5662733", "0.5653644", "0.5643683", "0.5636705", "0.56306964", "0.56285954", "0.5627392", "0.5624214", "0.5622798", "0.56183696", "0.5608336", "0.55934334", "0.5583729", "0.55598986", "0.55522865", "0.5545315", "0.5541432", "0.55377114", "0.5522565", "0.55163133", "0.5514287", "0.55035657", "0.54929584", "0.54836583", "0.5481147", "0.54685336", "0.5468315", "0.545491", "0.54525065", "0.5440588", "0.54342437", "0.5418957", "0.5393868", "0.539303", "0.53801155", "0.53718126", "0.53707165", "0.53594023", "0.5347312", "0.53442055", "0.5341578", "0.53325933", "0.53287613", "0.53187686", "0.53173625", "0.531374" ]
0.74411386
0
Sends a command to the ros service to update ocean currents based on vehicle position
def update_current(self): velocity, horizontal_angle, vertical_angle = self.current_function() self.set_current_velocity(velocity, horizontal_angle, vertical_angle)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute(self):\n self._odom_msg.header.stamp = rospy.Time.now()\n # query base state from robot and store in odom msg\n position, orientation, linear_velocity, angular_velocity = self._robot.get_base_state()\n [self._odom_msg.pose.pose.position.x,\n self._odom_msg.pose.pose.position.y,\n self._odom_msg.pose.pose.position.z] = position\n [self._odom_msg.pose.pose.orientation.x,\n self._odom_msg.pose.pose.orientation.y,\n self._odom_msg.pose.pose.orientation.z,\n self._odom_msg.pose.pose.orientation.w] = orientation\n [self._odom_msg.twist.twist.linear.x,\n self._odom_msg.twist.twist.linear.y,\n self._odom_msg.twist.twist.linear.z] = linear_velocity\n [self._odom_msg.twist.twist.angular.x,\n self._odom_msg.twist.twist.angular.y,\n self._odom_msg.twist.twist.angular.z] = angular_velocity\n self._publisher.publish(self._odom_msg)\n\n tf_msg = TransformStamped()\n tf_msg.header.frame_id = self._odom_msg.header.frame_id\n tf_msg.child_frame_id = self._odom_msg.child_frame_id\n tf_msg.transform.translation = self._odom_msg.pose.pose.position\n tf_msg.transform.rotation = self._odom_msg.pose.pose.orientation\n tf_msg.header.stamp = rospy.Time.now()\n self._br.sendTransform(tf_msg)", "def send_ned_velocity(uvwLoc):\n vel = 5. * 2. # m/s at average cdf=.5\n m = norm(7.8, 15.22).cdf(np.sum(uvwLoc**2.))\n uvwLocNorm = -(uvwLoc / np.abs(uvwLoc).max())\n uvwVel = m * vel * uvwLocNorm\n velocity_x, velocity_y, velocity_z = uvwVel[1], uvwVel[0], uvwVel[2]\n rospy.logdebug(\"{} m={} mXv={} uvw={} norm={} vel={}\".format(tag, m, m*vel, uvwLoc, uvwLocNorm, velocity_x, velocity_y, velocity_z))\n if vehicle.mode.name != \"GUIDED\":\n rospy.logerr(\"{} send_ned_ve not in Guided mode {}\".format(tag, vehicle.mode.name))\n return\n\n msg = vehicle.message_factory.set_position_target_local_ned_encode(\n 0, # time_boot_ms (not used)\n 0, 0, # target system, target component\n mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame\n 0b0000111111000111, # type_mask (only speeds enabled)\n 0, 0, 0, # x, y, z positions (not used)\n velocity_x, velocity_y, velocity_z, # x, y, z velocity in m/s\n 0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)\n 0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)\n # send command to vehicle on 1 Hz cycle\n vehicle.send_mavlink(msg)\n # for _ in range(2):\n # vehicle.send_mavlink(msg)\n # rospy.sleep(rospy.Duration.from_sec(.15))", "def main():\n\n\t# Initialising ROS node\n\trospy.init_node(\"turtlebot_move\")\n\n\t# Reading parameters from the launch file\n\tnpy_path = rospy.get_param(\"/publish_velocity/npy_file_path\")\n\n\t# Reading the generated A* path from the .npy file\n\t# rospack = rospkg.RosPack()\n\t# npy_path = os.path.join(rospack.get_path('turtlebot_astar'), 'src/path_dumps/path_final.npy')\n\trobot_path_list = np.load(npy_path, allow_pickle=True)\n\n\tglobal goal\n\tgoal.x, goal.y = robot_path_list[0].getXYCoords()\n\n\t# Creating the Publisher and the Subscriber\n\tpub = rospy.Publisher(\"/cmd_vel\", Twist, queue_size=1)\n\tsub = rospy.Subscriber(\"/odom\", Odometry, newOdom, (robot_path_list, pub))\n\n\tr = rospy.Rate(4)\n\tspeed = Twist()\n\n\ttry:\n\t\twhile not rospy.is_shutdown():\n\n\t\t\tinc_x = goal.x - x\n\t\t\tinc_y = goal.y - y\n\n\t\t\tangle_to_goal = math.atan2(inc_y, inc_x)\n\n\t\t\tif abs(angle_to_goal - theta) < 0.1:\n\t\t\t\tspeed.linear.x = 0.5\n\t\t\t\tspeed.angular.z = 0.0\n\t\t\telif (angle_to_goal - theta) < 0:\n\t\t\t\tspeed.linear.x = 0.0\n\t\t\t\tspeed.angular.z = -0.3\n\t\t\telse:\n\t\t\t\tspeed.linear.x = 0.0\n\t\t\t\tspeed.angular.z = 0.3\n\n\t\t\t# Publishing the Velocity Inputs for the TurtleBot on the topic /cmd_vel\n\t\t\tpub.publish(speed)\n\t\t\tr.sleep()\n\n\texcept rospy.exceptions.ROSInterruptException as ros_int:\n\t\tprint(ros_int)\n\texcept Exception as e:\n\t\traise e", "def update_vehicle_state(self):\n #vel = self.v + self.commands['throttle']/self.m/self.simulation_rate\n\n vel = self.commands['speed']\n steer = self.commands['steering_angle']\n\n if steer > 0.5:\n steer_cmd = 25\n elif steer < -0.5:\n steer_cmd = 185\n else:\n steer_cmd = 100 - 160*steer ##linear\n #steer_cmd = 100 - 640*steer**3 ##cubic\n\n #rospy.logwarn('Velocity command is '+ str(vel))\n # 130 is the lowest vel_cmd that makes the truck move.\n if vel > 12:\n vel_cmd = 161\n elif vel < 0:\n vel_cmd = 0\n else:\n vel_cmd = 3.77*vel + 117\n # rospy.logerr('throttle: ' + str(throttle))\n hw_port.set_command(vel_cmd,steer_cmd,self.vehicle_id)", "def move(self):\n # moving each of the obstacles\n time = rospy.get_rostime()\n self.vel_msg1.linear.x = math.sin(time.to_sec())\n self.pub1.publish(self.vel_msg1)\n self.vel_msg2.linear.x = math.cos(1.2 * time.to_sec())\n self.pub2.publish(self.vel_msg2)\n self.vel_msg3.linear.x = math.cos(2 * time.to_sec())\n self.pub3.publish(self.vel_msg3)\n self.vel_msg4.linear.x = math.sin(0.85 * time.to_sec())\n self.pub4.publish(self.vel_msg4)", "def update_position(self, event):\n\n # Create a copy of the most recent stored twist data to perform calculations\n with self.lock:\n velocity_data = copy.deepcopy(self.twist)\n\n # Time elapsed since last update position call\n if hasattr(event, 'last_real'):\n if event.last_real is None:\n time = rospy.Duration(0.05)\n else:\n time = event.current_real - event.last_real\n \n time = time.to_sec()\n\n # Calculate angle turned in the given time using omega = theta/time\n angle = velocity_data.angular.z*time\n\n # Calculate distance travelled in the given time using linear velocity = arc distance/time\n distance = velocity_data.linear.x*time\n\n # Calculate yaw of the robot\n self.vehicle_yaw += angle\n\n # Calculate vehicle x, y, z position coordinates\n # TODO recalculate the position based on traveling in a circular arc.\n self.pose.position.x += (distance)*cos(self.vehicle_yaw)\n self.pose.position.y += (distance)*sin(self.vehicle_yaw)\n\n # Calculate z position using linear interpolation and create cloud array\n \n # 1. Create ranges to be used in interpolation function\n terrain_points_x = np.arange(0, self.gaussian_array.shape[1]*self.resolution, self.resolution)\n terrain_points_y = np.arange(0, self.gaussian_array.shape[0]*self.resolution, self.resolution)\n\n # 2. Create array of points to be converted to point cloud for vizualization\n terrain_mesh_x, terrain_mesh_y = np.meshgrid(terrain_points_x, terrain_points_y)\n terrain_x = terrain_mesh_x.ravel()\n terrain_y = terrain_mesh_y.ravel()\n terrain_z = self.gaussian_array.ravel()\n terrain_grid_points = np.stack((terrain_x, terrain_y, terrain_z), axis=1)\n\n # 3. Create interpolation function based on the ranges and gaussian data\n interp_func = RectBivariateSpline(terrain_points_y, terrain_points_x, self.gaussian_array)\n\n # 4. Find z value for x and y coordinate of vehicle using interpolation function\n # TODO compute z height based on footprint\n self.pose.position.z = interp_func(self.pose.position.y, self.pose.position.x)\n\n # Convert Euler Angles to Quarternion\n V_rotation = tf.transformations.quaternion_from_euler(0.0, 0.0, self.vehicle_yaw)\n\n # Broadcast vehicle frame which is a child of the world frame\n br = tf.TransformBroadcaster()\n br.sendTransform((self.pose.position.x, self.pose.position.y, self.pose.position.z), \n V_rotation, rospy.Time.now(),\"vehicle_frame\", \"map\")\n\n # Construct the homogenous transformation matrix for map to vehicle frame\n V_translation = [self.pose.position.x, self.pose.position.y, self.pose.position.z]\n map_T_V = tf.transformations.quaternion_matrix(V_rotation) \n map_T_V[:3,3] = np.array(V_translation)\n\n # Create footprint of vehicle\n V_footprint_range_x = np.linspace((-self.vehicle_length/2), (self.vehicle_length/2), 30)\n V_footprint_range_y = np.linspace((-self.vehicle_width/2), (self.vehicle_width/2), 15)\n V_footprint_mesh_x, V_footprint_mesh_y = np.meshgrid(V_footprint_range_x, V_footprint_range_y)\n V_footprint_x = V_footprint_mesh_x.ravel()\n V_footprint_y = V_footprint_mesh_y.ravel()\n\n # For every point in the vehicle footprint, calculate the position wrt to the vehicle's frame\n # and its interpolated z value. Add this point to a list of points for visualization.\n # TODO Flatten into a single matrix multiply to remove for loop\n V_viz_points = []\n for i in range(V_footprint_x.shape[0]):\n p = Point()\n V_footprint_point = np.array([[V_footprint_x[i]],[V_footprint_y[i]], [0.0], [1.0]])\n V_footprint_point = np.matmul(map_T_V, V_footprint_point)\n V_footprint_point[2, 0] = interp_func(V_footprint_point[1, 0], V_footprint_point[0, 0])\n p.x = V_footprint_point[0, 0]\n p.y = V_footprint_point[1, 0]\n p.z = V_footprint_point[2, 0]\n V_viz_points.append(p)\n\n #####################################################################################\n # Create a copy of the most recent stored JointState data to perform calculations\n with self.joint_lock:\n joint_data = copy.deepcopy(self.joint)\n\n # If the data is empty on first run, fill with 0.0\n if not joint_data.velocity:\n joint_data.velocity = [0.0,0.0]\n \n # Calculate angle based on velocity data and time\n angle = joint_data.velocity[0]*time\n angle2 = joint_data.velocity[1]*time\n\n self.joint1_pitch += angle\n self.joint2_pitch += angle2\n\n # Transformations from vehicle frame to Joint1 and Joint2\n \n # Static rotation about z-axis \n static_rot = tf.transformations.quaternion_from_euler(0.0, 0.0, 3.14159)\n translation = [0.0, 0.0, 0.0]\n V_T_SRz = tf.transformations.quaternion_matrix(static_rot)\n V_T_SRz[:3,3] = np.array(translation)\n\n # Dynamic rotation about the y-axis of Joint 1\n rot_SRz_T_J1 = [[cos(self.joint1_pitch), 0.0, sin(self.joint1_pitch)],\n [0.0, 1.0, 0.0],\n [-sin(self.joint1_pitch), 0.0, cos(self.joint1_pitch)]]\n\n trans_SRz_T_J1 = [0.0, 0.0, 0.0, 1.0]\n\n SRz_T_J1 = np.zeros((4,4))\n SRz_T_J1[:3,:3] = rot_SRz_T_J1\n SRz_T_J1[:4,3] = trans_SRz_T_J1\n\n # Translation based on length of Joint 1 arm \n no_rot = tf.transformations.quaternion_from_euler(0.0, 0.0, 0.0)\n translation = [self.joint1_length, 0.0, 0.0]\n J1_T_STx = tf.transformations.quaternion_matrix(no_rot)\n J1_T_STx[:3,3] = np.array(translation)\n\n # Dynamic rotation about y-axis of Joint 2\n dynamic_rot2 = tf.transformations.quaternion_from_euler(0.0, self.joint2_pitch, 0.0)\n translation = [0.0, 0.0, 0.0]\n STx_T_J2 = tf.transformations.quaternion_matrix(dynamic_rot2)\n STx_T_J2[:3,3] = np.array(translation)\n\n # matrix multiplication to form the homogenous matrices\n V_T_J1 = np.matmul(V_T_SRz, SRz_T_J1)\n V_T_STx = np.matmul(V_T_J1, J1_T_STx)\n V_T_J2 = np.matmul(V_T_STx, STx_T_J2)\n\n frame_J1 = tf_conversions.fromMatrix(V_T_J1)\n frame_J2 = tf_conversions.fromMatrix(V_T_J2)\n\n # The ripper tip is a point in the J2's frame, this is based on the length of the ripper\n ripper_tip_point_J2 = [self.ripper_length, 0.0, 0.0, 1.0]\n map_T_J2 = np.matmul(map_T_V, V_T_J2)\n ripper_tip_pt_map = np.matmul(map_T_J2, ripper_tip_point_J2)\n ripper_tip_point_viz = Point()\n ripper_tip_point_viz.x = ripper_tip_pt_map[0]\n ripper_tip_point_viz.y = ripper_tip_pt_map[1]\n ripper_tip_point_viz.z = ripper_tip_pt_map[2]\n V_viz_points.append(ripper_tip_point_viz)\n\n # use the ripper's position as an index value to access the gaussian array\n ripper_tip_cell_index_x = int(ripper_tip_pt_map[1]/self.resolution)\n ripper_tip_cell_index_y = int(ripper_tip_pt_map[0]/self.resolution)\n\n # Create a range of index values surrounding index_x and y\n nearby_index_cells_range_x = np.arange((ripper_tip_cell_index_x-1),(ripper_tip_cell_index_x+2), 1)\n nearby_index_cells_range_y = np.arange((ripper_tip_cell_index_y-1),(ripper_tip_cell_index_y+2), 1)\n nearby_index_cells_mesh_x, nearby_index_cells_mesh_y = np.meshgrid(nearby_index_cells_range_x,nearby_index_cells_range_y)\n nearby_index_cells_x = nearby_index_cells_mesh_x.ravel()\n nearby_index_cells_y = nearby_index_cells_mesh_y.ravel()\n\n # First check if the index is within the gaussian array, if it is, then check if the tip of\n # the ripper is beneath the soil, if it is, then remove the soil above the tip and disperse\n # it to the surrounding cells, provided those cells are also within the gaussian array\n # TODO Remove use of for loops and excess if statements\n\n if (0 <= ripper_tip_cell_index_x <= (self.gaussian_array.shape[0]-1)) and (0 <= ripper_tip_cell_index_y <= (self.gaussian_array.shape[1]-1)):\n if (self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] > ripper_tip_pt_map[2]):\n diff = self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] - ripper_tip_pt_map[2]\n for i in range(nearby_index_cells_x.shape[0]):\n if (0 <= nearby_index_cells_x[i] <= (self.gaussian_array.shape[0]-1)) and (0 <= nearby_index_cells_y[i] <= (self.gaussian_array.shape[1]-1)):\n self.gaussian_array[nearby_index_cells_x[i]][nearby_index_cells_y[i]] += diff/8\n self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] = ripper_tip_pt_map[2]\n \n\n # Publish all messages\n self.publish_messages(V_translation, V_rotation, terrain_grid_points, V_viz_points, frame_J1, frame_J2)", "def status_update(position, velocity, angle, angle_vel, thrust, time, lin_targets=None):\n print(f\"time: {time}\")\n\n pos_title = \"position:\".ljust(25)\n vel_title = \"velocity:\".rjust(25)\n print(pos_title + vel_title)\n for i in range(3):\n print(f\"{position[i, 0]}\".ljust(25) + f\"{velocity[i, 0]}\".rjust(25))\n print()\n angle_title = \"angle:\".ljust(25)\n angle_vel_title = \"angular velocity:\".rjust(25)\n print(angle_title + angle_vel_title)\n for i in range(3):\n print(f\"{angle[i, 0]}\".ljust(25) + f\"{angle_vel[i, 0]}\".rjust(25))\n\n print(\"\\nthrust:\")\n thrust_string = \"\"\n for i in range(4):\n thrust_string += f\"{thrust[i]}\\t\"\n print(thrust_string)\n print()\n\n if lin_targets is not None:\n print(f\"x_target: {lin_targets[-3]}\")\n print(f\"y_target: {lin_targets[-2]}\")\n print(f\"z_target: {lin_targets[-1]}\")\n print()", "def updatePos(self):\n self.timeDriving +=1\n self.pos[0] += self.vx\n self.pos[1] += self.vy", "def update(self) -> None:\n dev_id = slugify(self.vehicle.name)\n\n if not self.vehicle.state.is_vehicle_tracking_enabled:\n _LOGGER.debug(\"Tracking is disabled for vehicle %s\", dev_id)\n return\n\n _LOGGER.debug(\"Updating %s\", dev_id)\n attrs = {\"vin\": self.vehicle.vin}\n self._see(\n dev_id=dev_id,\n host_name=self.vehicle.name,\n gps=self.vehicle.state.gps_position,\n attributes=attrs,\n icon=\"mdi:car\",\n )", "def update_vehicle_state(self):\n sim_timestep = 1. / self.simulation_rate\n # Decompose v into x and y component.\n if self.v != self.commands['speed']:\n self.v = self.commands['speed']\n vx = numpy.cos(self.yaw) * self.v\n vy = numpy.sin(self.yaw) * self.v\n # Update vehicles position\n self.x += vx * sim_timestep\n self.y += vy * sim_timestep\n self.yaw += ((self.v / self.axles_distance) *\n numpy.tan(self.commands['steering_angle']) *\n sim_timestep)\n # Make sure self.yaw is never negative.\n # self.yaw 0..2pi\n if self.yaw > 2*numpy.pi:\n self.yaw = 0.\n elif self.yaw < 0.:\n self.yaw += 2*numpy.pi", "def execute(self):\n control_params = {\"bodyUniqueId\": self._robot.id, \"jointIndices\": self._robot.joint_indices}\n if self._pc_subscriber.get_is_data_available():\n control_params[\"controlMode\"] = self._pb.POSITION_CONTROL\n control_params[\"targetPositions\"] = self._pc_subscriber.get_last_cmd()\n control_params[\"forces\"] = self._force_commands\n if self._vc_subscriber.get_is_data_available():\n control_params[\"controlMode\"] = self._pb.VELOCITY_CONTROL\n control_params[\"targetVelocities\"] = self._vc_subscriber.get_last_cmd()\n control_params[\"forces\"] = self._force_commands\n if self._ec_subscriber.get_is_data_available():\n control_params[\"controlMode\"] = self._pb.TORQUE_CONTROL\n control_params[\"forces\"] = self._ec_subscriber.get_last_cmd()\n\n if \"controlMode\" in control_params.keys():\n self._pb.setJointMotorControlArray(**control_params)", "def drive(self, carstate: State) -> Command:\n command = Command()\n stateList = self.stateToArray(carstate)\n # output = self.network.activate(stateList)\n\n # Set the link to find the file of the one to work with\n if not self.set:\n files = glob.glob(\"cooperation*.txt\")\n\n for fileN in files:\n if fileN != \"cooperation\" + str(self.id) + \".txt\":\n self.co_car = fileN\n break;\n self.set = True\n\n fh = open(\"cooperation\" + str(self.id) + \".txt\",\"w\")\n fh.write(str(self.id) + \": \" + str(carstate.distance_raced))\n fh.close()\n\n fh = open(self.co_car,\"r\")\n lines = fh.read()\n distance_other = float(lines.split(\": \")[-1])\n fh.close()\n\n if distance_other > carstate.distance_raced:\n opponents = carstate.opponents\n # Now get information from these opponents to ride against them,\n # Feed that information in the network and drive\n # by changing the stateList\n output = self.network.forward(stateList).data\n\n #print(output)\n #print(carstate.speed_x)\n #print(carstate.distance_from_start)\n #print(carstate.opponents)\n #self.steer(carstate, output[0, 2], command)\n command.steering = output[0,0]\n #self.accelerate(carstate, 80, command)\n if carstate.speed_x < 0.1:\n \tcommand.accelerator = abs(output[0,1])\n else:\n \tcommand.accelerator = output[0,1]\n\n if output[0,1] < 0.0:\n \tcommand.brake = output[0,2]\n else:\n \tcommand.brake = 0\n\n if command.accelerator > 0:\n \tif carstate.rpm > 8000:\n \t\tcommand.gear = carstate.gear + 1\n if command.accelerator < 0:\n \tif carstate.rpm < 2500:\n \t\tcommand.gear = carstate.gear - 1\n if not command.gear:\n \tcommand.gear = carstate.gear or 1\n\n #acceleration = output[0,1]*129\n #acceleration = math.pow(acceleration, 3)\n\n\n #if acceleration > 0:\n # if abs(carstate.distance_from_center) >= 1:\n # off track, reduced grip:\n # acceleration = min(0.4, acceleration)\n\n # command.accelerator = min(acceleration, 1)\n\n # if carstate.rpm > 8000:\n # command.gear = carstate.gear + 1\n\n #else:\n # command.brake = min(-acceleration, 1)\n\n #if carstate.rpm < 2500:\n # command.gear = carstate.gear - 1\n\n #if not command.gear:\n # command.gear = carstate.gear or 1\n\n\n #if output[0,0]>0.5:\n # ACC_LATERAL_MAX = 6400 * 5\n # v_x = min(80, math.sqrt(ACC_LATERAL_MAX / abs(command.steering)))\n #else:\n # v_x = 0\n #self.accelerate(carstate, 85, command)\n\n\n\n if self.data_logger:\n self.data_logger.log(carstate, command)\n\n return command", "def compute_controller(self):\n \n # here we implement an example for a consensus algorithm\n neig = self.get_neighbors()\n messages = self.get_messages()\n pos, rot = self.get_pos_and_orientation()\n \n #send message of positions to all neighbors indicating our position\n for n in neig:\n self.send_message(n, pos)\n \n # check if we received the position of our neighbors and compute desired change in position\n # as a function of the neighbors (message is composed of [neighbors id, position])\n dx = 0.\n dy = 0.\n if messages:\n for m in messages:\n dx += m[1][0] - pos[0]\n dy += m[1][1] - pos[1]\n # integrate\n des_pos_x = pos[0] + self.dt * dx\n des_pos_y = pos[1] + self.dt * dy\n \n #compute velocity change for the wheels\n vel_norm = np.linalg.norm([dx, dy]) #norm of desired velocity\n if vel_norm < 0.01:\n vel_norm = 0.01\n des_theta = np.arctan2(dy/vel_norm, dx/vel_norm)\n right_wheel = np.sin(des_theta-rot)*vel_norm + np.cos(des_theta-rot)*vel_norm\n left_wheel = -np.sin(des_theta-rot)*vel_norm + np.cos(des_theta-rot)*vel_norm\n self.set_wheel_velocity([left_wheel, right_wheel])", "def controller_routine(self, event=None):\n # pop trajectory if available\n with self.lock:\n vel_commands_in_rob = self.vel_commands_in_rob\n self.vel_commands_in_rob = None\n if vel_commands_in_rob is None:\n return\n # execute trajectory unless a new one is requested\n for i in range(len(vel_commands_in_rob)):\n # check if trajectory is stale\n if (i * DT) >= CONTROLLER_ZOMBIE_TIMEOUT:\n cmd_vel_msg = Twist()\n rospy.logwarn(\"Reached controller zombie timeout\")\n cmd_vel_msg.linear.x = 0\n cmd_vel_msg.linear.y = 0\n cmd_vel_msg.angular.z = 0\n self.cmd_vel_pub.publish(cmd_vel_msg)\n return\n # send current trajectory\n with self.lock:\n STOP = self.STOP\n if not STOP:\n vel_x = vel_commands_in_rob[i, 0] * VEL_MULT\n vel_y = vel_commands_in_rob[i, 1] * VEL_MULT\n vel_norm = np.sqrt(vel_x**2 + vel_y**2)\n if vel_norm > self.MAX_VEL:\n vel_x = vel_x / vel_norm * self.MAX_VEL\n vel_y = vel_y / vel_norm * self.MAX_VEL\n # publish cmd_vel\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = vel_x\n cmd_vel_msg.linear.y = vel_y\n cmd_vel_msg.angular.z = vel_commands_in_rob[i, 2] * VEL_MULT\n self.cmd_vel_pub.publish(cmd_vel_msg)\n # sleep for DT\n rospy.sleep(DT)\n # check if a newer trajectory is available\n with self.lock:\n if self.vel_commands_in_rob is not None:\n return", "def send_initial_data():\n since = datetime.utcnow() - timedelta(minutes=10)\n entries = models.VehicleLocationLog.get_latest_entries(since)\n\n for entry in entries:\n socketio.emit('update_location',\n {'vehicle_id': entry.vehicle.vehicle_uuid,\n 'lat': entry.lat,\n 'lng': entry.lng},\n namespace='/vehicles',\n room=request.sid)", "def Run(self):\n # Waits until the action server has started up and started\n # listening for goals.\n self._MoveBaseClient.wait_for_server()\n rospy.loginfo(\"move_base is UP!\")\n\n goal = move_base_msgs.msg.MoveBaseGoal()\n # print(\"Empty goal:\")\n # print(goal)\n # Note that move_base will not go to an all zero target.\n\n # Grab a static copy of the current pose to work with\n # Otherwise it might change under our feet!\n \"\"\"\n Note, the actual pose on the map is not the same as this,\n but there is not map based pose.\n What there is the odometry based pose, and then a transform\n from the odometry to the map.\n Retriving the transform, combining it with the odom pose\n and making use of it is a future exercise.\n \"\"\"\n current_odom = self.currentOdom\n # print(\"Current odom:\")\n # print(current_odom)\n print(\"current_odom.pose:\")\n print(current_odom.pose)\n # rospy.Subscriber(\"cmd_vel\", Twist, self._HandleVelocityCommand)\n\n rosNow = rospy.Time.now()\n # we'll create a goal to send to move_base\n goal.target_pose.header.frame_id = \"base_link\"\n goal.target_pose.header.stamp = rosNow\n\n # This will move forward 1 meter from 0\n # goal.target_pose.pose.position.x = 0.0\n # goal.target_pose.pose.orientation.w = 1.0\n\n # Set new pose to same as current pose\n \"\"\"\n You have to set .position and .orientation,\n not .pose because the current_odom.pose\n includes covariance, the other cannot take\n \"\"\"\n goal.target_pose.pose.position = current_odom.pose.pose.position\n goal.target_pose.pose.orientation = current_odom.pose.pose.orientation\n \"\"\"\n If the odometry, which is tied to /base_link, was identical\n to the map location, this would tell it to go nowhere,\n but what we actually end up doing here is telling move_base\n to move the robot the difference between the odom (/base_link)\n and the map. :)\n \"\"\"\n \"\"\"\n a quick and easy way to get the transform from the /map to /base_link is to use the command-line tool:\n rosrun tf tf_echo /map /base_link\n So how do I combine this myself?\n \"\"\"\n\n # Rotate currentPose by 90 degrees\n quaternion_difference = tf2_ros.transformations.quaternion_about_axis(\n 0.123, (1, 0, 0)\n )\n # print(\"quaternion_difference:\")\n # print(quaternion_difference)\n\n print(\"Populated goal:\")\n print(goal.target_pose.pose)\n\n rospy.loginfo(\"Sending goal\")\n # Sends the goal to the action server.\n self._MoveBaseClient.send_goal(goal)\n\n rospy.loginfo(\"Waiting for response . . .\")\n # Waits for the server to finish performing the action.\n self._MoveBaseClient.wait_for_result()\n # This could wait a VERY long time,\n # if the move_base doesn't have a timeout it will never come back,\n # in most cases it does, but it seems in some cases it will retry forever.\n # http://docs.ros.org/api/actionlib/html/classactionlib_1_1simple__action__client_1_1SimpleActionClient.html#a460c9f52fd650f918cb287765f169445\n\n result = self._MoveBaseClient.get_result()\n # rospy.loginfo(result)\n result = self._MoveBaseClient.get_state()\n # rospy.loginfo(result)\n\n current_odom = self.currentOdom\n print(\"New odom:\")\n print(current_odom.pose)\n\n rospy.loginfo(\"Ok, now what?\")", "def odom_update(self, data):\n self.curr_pos = (data.pose.pose.position.x, data.pose.pose.position.y)", "def update(self):\n return_code, pos_new = sim.simxGetObjectPosition(self.clientID, self.from_handle, self.to_handle, sim.simx_opmode_streaming)\n self.distance = np.linalg.norm(np.asarray(pos_new))", "def _update(self):\n self._execute_lane_changes()\n self._execute_forward_movement()", "def set_robot_pos(self):\n\t\tx,y,z = self.geo2desiredENU(self.curr_lat, self.curr_lon, self.gpsAlt)\n\t\tself.robot_msg.point.x = x\n\t\tself.robot_msg.point.y = y\n\t\tself.robot_msg.point.z = z", "def send_update(self, neighbor):\n message = 'ROUTE UPDATE'\n source = ':'.join([self.name[0], str(self.name[1])])\n dv = []\n for others in self.distance_vector:\n others_sep = others.split(':')\n dv.append(','.join([others_sep[0], others_sep[1], str(self.distance_vector[others].cost)]))\n dv = '\\n'.join(dv)\n to_send = '\\n'.join([message, source, dv])\n neighbor.sok.sendto(to_send, (neighbor.addr, neighbor.port))\n neighbor.send_timer = time.time()\n neighbor.update_ready = False", "def turn_on(self):\n # read out the current pose of the robot\n configuration = self.robot.get_all_servo_position()\n\n # interpolate to the default position\n interpolation_time = 3000 # ms\n interpolation_steps = interpolation_time // TIME_FRAME\n\n speed = np.zeros(18)\n for i in range(18):\n speed[i] = (SERVOS_BASE[i] - configuration[i]) / interpolation_steps\n\n # execute the motion\n for t in range(interpolation_steps):\n self.robot.set_all_servo_position(configuration + t * speed)", "def update_state(self):\n self.last_position = self.current_position\n self.last_distance = self.current_distance\n self.last_collision_time_stamp = self.current_collision_time_stamp\n self.current_kinematics = self.airsim_client.simGetGroundTruthKinematics(vehicle_name=self.drone_name)\n self.current_position = self.current_kinematics.position + self.base_offset\n self.current_collision_time_stamp = self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name).time_stamp\n # print(\"DEBUG: simGetCollisionInfo:\", self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name))\n # self.pending_death = self.airsim_client.simIsRacerDisqualified(vehicle_name=self.drone_name)\n self.objective_status = self.current_objective.next_gate_status(self.last_position, self.current_position)\n if self.objective_status == GateStatus.CROSSED or self.objective_status == GateStatus.PASSED:\n if self.switch_to_next_objective(): # if track is finished (changes self.last_distance)\n self.track_complete = True\n self.current_distance = self.current_position.distance_to(self.current_objective.gate_pose.position)", "def on_can_bus_update(self, msg):\n self._logger.debug('@{}: {} received message'.format(\n msg.timestamp, self._name))\n vehicle_transform = msg.data.transform\n # Draw position. We add 0.5 to z to ensure that the point is above the\n # road surface.\n self._world.debug.draw_point(carla.Location(\n x=vehicle_transform.location.x,\n y=vehicle_transform.location.y,\n z=vehicle_transform.location.z + 0.5),\n size=0.2,\n life_time=DEFAULT_VIS_TIME,\n color=carla.Color(255, 0, 0))", "def Run(self):\n while not rospy.is_shutdown():\n # get input of destination\n if (self.target_x is None) or (self.target_y is None):\n self.target_x, self.target_y = self.GetTarget()\n # if the destination is not in x >= 0 and y <= 11, ask user to re-input\n # till the right values found\n if (self.target_x < 0 or self.target_x > 10) or (self.target_y < -10 or self.target_y > 10):\n print(\"-------------------------------------------------------\") \n print(\"WARNING: Invalid Input, please reinput the destination.\")\n print(\"-------------------------------------------------------\") \n self.target_x = None\n self.target_y = None\n else:\n print(\"Current Destination: [{}, {}]\".format(self.target_x, self.target_y))\n else:\n ################################################################################################\n # get all necessary parameters\n goal = np.array([self.target_x, self.target_y])\n robot_pos = self.GetRobotInfo()\n ranges, angles = self.GetObstaclePos()\n\n if (ranges is not None) and (angles is not None):\n ctrl = TangentBug(self.Lidar.range_max)\n # obsts = ctrl.Continuity(ranges, angles, robot_pos[:2])\n # print(len(obsts))\n linear, omega = ctrl.MotionToGo(ranges, angles, goal, robot_pos)\n print(\"=======================================\")\n # print([linear, omega])\n else:\n linear = 0.\n omega = 0.\n print(\"---------------------------------------\")\n print(\"NO OBSTACLE DETECTED.\")\n print(\"---------------------------------------\")\n\n ################################################################################################\n self.vel.linear.x = linear \n self.vel.angular.z = omega\n self.pub.publish(self.vel)\n\n # sleep till the next commend sent\n self.rate.sleep()", "def cmd_callback(data, car):\n vel = int(data.velocity)\n steer = int(data.steer)\n vel = np.min([np.max([vel, -100]), 100])\n steer = np.min([np.max([steer, -100]), 100])\n\n if car.is_connected():\n # rospy.loginfo('CarAct: [' + rospy.get_caller_id() +\n # ']: v=%f, s=%f', vel, steer)\n car.command_car(vel, steer)\n else:\n print(\"Something is wrong with serial connection\")", "def updateposition(self, event):\n\n self.log('Updating position', lvl=verbose)\n self.current_position = event.vessel.geojson['coordinates']", "def servo(self, position: int):\n position = int(position) # bytes only takes ints\n\n command = bytearray([])\n command += b\"e\"\n command += bytes([position])\n command += b\"\\n\"\n\n # newline indicates end of command to arduino\n self.send(command)", "def update(self, CS, CP, VM, PP, live20, live100, md, live_map_data):\n cur_time = live20.logMonoTime / 1e9\n v_ego = CS.carState.vEgo\n gasbuttonstatus = CS.carState.gasbuttonstatus\n\n long_control_state = live100.live100.longControlState\n v_cruise_kph = live100.live100.vCruise\n force_slow_decel = live100.live100.forceDecel\n v_cruise_setpoint = v_cruise_kph * CV.KPH_TO_MS\n\n\n for socket, event in self.poller.poll(0):\n if socket is self.lat_Control:\n self.lastlat_Control = messaging.recv_one(socket).latControl\n\n self.lead_1 = live20.live20.leadOne\n self.lead_2 = live20.live20.leadTwo\n\n\n lead_1 = live20.live20.leadOne\n lead_2 = live20.live20.leadTwo\n\n\n enabled = (long_control_state == LongCtrlState.pid) or (long_control_state == LongCtrlState.stopping)\n following = lead_1.status and lead_1.dRel < 45.0 and lead_1.vLeadK > v_ego and lead_1.aLeadK > 0.0\n\n v_speedlimit = NO_CURVATURE_SPEED\n v_curvature = NO_CURVATURE_SPEED\n map_valid = live_map_data.liveMapData.mapValid\n\n # Speed limit and curvature\n set_speed_limit_active = self.params.get(\"LimitSetSpeed\") == \"1\" and self.params.get(\"SpeedLimitOffset\") is not None\n if set_speed_limit_active:\n if live_map_data.liveMapData.speedLimitValid:\n speed_limit = live_map_data.liveMapData.speedLimit\n offset = float(self.params.get(\"SpeedLimitOffset\"))\n v_speedlimit = speed_limit + offset\n\n if live_map_data.liveMapData.curvatureValid:\n curvature = abs(live_map_data.liveMapData.curvature)\n a_y_max = 2.975 - v_ego * 0.0375 # ~1.85 @ 75mph, ~2.6 @ 25mph\n v_curvature = math.sqrt(a_y_max / max(1e-4, curvature)) / 1.3 * _brake_factor\n v_curvature = min(NO_CURVATURE_SPEED, v_curvature)\n\n decel_for_turn = bool(v_curvature < min([v_cruise_setpoint, v_speedlimit, v_ego + 1.]))\n v_cruise_setpoint = min([v_cruise_setpoint, v_curvature, v_speedlimit])\n\n # Calculate speed for normal cruise control\n if enabled:\n accel_limits = map(float, calc_cruise_accel_limits(v_ego, following, gasbuttonstatus))\n if gasbuttonstatus == 0:\n accellimitmaxdynamic = -0.0018*v_ego+0.2\n jerk_limits = [min(-0.1, accel_limits[0] * 0.5), max(accellimitmaxdynamic, accel_limits[1])] # dynamic\n elif gasbuttonstatus == 1:\n accellimitmaxsport = -0.002*v_ego+0.4\n jerk_limits = [min(-0.25, accel_limits[0]), max(accellimitmaxsport, accel_limits[1])] # sport\n elif gasbuttonstatus == 2:\n accellimitmaxeco = -0.0015*v_ego+0.1\n jerk_limits = [min(-0.1, accel_limits[0] * 0.5), max(accellimitmaxeco, accel_limits[1])] # eco\n \n if not CS.carState.leftBlinker and not CS.carState.rightBlinker:\n steering_angle = CS.carState.steeringAngle\n if self.lastlat_Control and v_ego > 11: \n angle_later = self.lastlat_Control.anglelater\n else:\n angle_later = 0\n else:\n angle_later = 0\n steering_angle = 0\n accel_limits = limit_accel_in_turns(v_ego, steering_angle, accel_limits, self.CP, angle_later * self.CP.steerRatio)\n\n if force_slow_decel:\n # if required so, force a smooth deceleration\n accel_limits[1] = min(accel_limits[1], AWARENESS_DECEL)\n accel_limits[0] = min(accel_limits[0], accel_limits[1])\n\n # Change accel limits based on time remaining to turn\n if decel_for_turn:\n time_to_turn = max(1.0, live_map_data.liveMapData.distToTurn / max(self.v_cruise, 1.))\n required_decel = min(0, (v_curvature - self.v_cruise) / time_to_turn)\n accel_limits[0] = max(accel_limits[0], required_decel)\n\n self.v_cruise, self.a_cruise = speed_smoother(self.v_acc_start, self.a_acc_start,\n v_cruise_setpoint,\n accel_limits[1], accel_limits[0],\n jerk_limits[1], jerk_limits[0],\n _DT_MPC)\n # cruise speed can't be negative even is user is distracted\n self.v_cruise = max(self.v_cruise, 0.)\n else:\n starting = long_control_state == LongCtrlState.starting\n a_ego = min(CS.carState.aEgo, 0.0)\n reset_speed = MIN_CAN_SPEED if starting else v_ego\n reset_accel = self.CP.startAccel if starting else a_ego\n self.v_acc = reset_speed\n self.a_acc = reset_accel\n self.v_acc_start = reset_speed\n self.a_acc_start = reset_accel\n self.v_cruise = reset_speed\n self.a_cruise = reset_accel\n\n self.mpc1.set_cur_state(self.v_acc_start, self.a_acc_start)\n self.mpc2.set_cur_state(self.v_acc_start, self.a_acc_start)\n\n self.mpc1.update(CS, lead_1, v_cruise_setpoint)\n self.mpc2.update(CS, lead_2, v_cruise_setpoint)\n\n self.choose_solution(v_cruise_setpoint, enabled)\n\n # determine fcw\n if self.mpc1.new_lead:\n self.fcw_checker.reset_lead(cur_time)\n\n blinkers = CS.carState.leftBlinker or CS.carState.rightBlinker\n fcw = self.fcw_checker.update(self.mpc1.mpc_solution, cur_time, v_ego, CS.carState.aEgo,\n lead_1.dRel, lead_1.vLead, lead_1.aLeadK,\n lead_1.yRel, lead_1.vLat,\n lead_1.fcw, blinkers) and not CS.carState.brakePressed\n if fcw:\n cloudlog.info(\"FCW triggered %s\", self.fcw_checker.counters)\n\n model_dead = cur_time - (md.logMonoTime / 1e9) > 0.5\n\n # **** send the plan ****\n plan_send = messaging.new_message()\n plan_send.init('plan')\n\n # TODO: Move all these events to controlsd. This has nothing to do with planning\n events = []\n if model_dead:\n events.append(create_event('modelCommIssue', [ET.NO_ENTRY, ET.SOFT_DISABLE]))\n\n radar_errors = list(live20.live20.radarErrors)\n if 'commIssue' in radar_errors:\n events.append(create_event('radarCommIssue', [ET.NO_ENTRY, ET.SOFT_DISABLE]))\n if 'fault' in radar_errors:\n events.append(create_event('radarFault', [ET.NO_ENTRY, ET.SOFT_DISABLE]))\n\n plan_send.plan.events = events\n plan_send.plan.mdMonoTime = md.logMonoTime\n plan_send.plan.l20MonoTime = live20.logMonoTime\n\n\n # longitudal plan\n plan_send.plan.vCruise = self.v_cruise\n plan_send.plan.aCruise = self.a_cruise\n plan_send.plan.vStart = self.v_acc_start\n plan_send.plan.aStart = self.a_acc_start\n plan_send.plan.vTarget = self.v_acc\n plan_send.plan.aTarget = self.a_acc\n plan_send.plan.vTargetFuture = self.v_acc_future\n plan_send.plan.hasLead = self.mpc1.prev_lead_status\n plan_send.plan.hasrightLaneDepart = bool(PP.r_poly[3] > -1.1 and not CS.carState.rightBlinker)\n plan_send.plan.hasleftLaneDepart = bool(PP.l_poly[3] < 1.05 and not CS.carState.leftBlinker)\n plan_send.plan.longitudinalPlanSource = self.longitudinalPlanSource\n\n plan_send.plan.vCurvature = v_curvature\n plan_send.plan.decelForTurn = decel_for_turn\n plan_send.plan.mapValid = map_valid\n\n # Send out fcw\n fcw = fcw and (self.fcw_enabled or long_control_state != LongCtrlState.off)\n plan_send.plan.fcw = fcw\n\n self.plan.send(plan_send.to_bytes())\n\n # Interpolate 0.05 seconds and save as starting point for next iteration\n dt = 0.05 # s\n a_acc_sol = self.a_acc_start + (dt / _DT_MPC) * (self.a_acc - self.a_acc_start)\n v_acc_sol = self.v_acc_start + dt * (a_acc_sol + self.a_acc_start) / 2.0\n self.v_acc_start = v_acc_sol\n self.a_acc_start = a_acc_sol", "def commandUpdate(self):\n pass", "def setPath(self, request, context):\n \n cmds = self.vehicle.commands\n coordFrame, alt = None, None\n waypoints = []\n \n # The idea behind stripping off the first position is to determine what reference frame to\n # to use. Future proto changes will removed the coordinate frame boolean flag from the \n # request making the code unnecessary. For now, this is the way it is.\n firstPosition = nth(request, 0)\n lat = firstPosition.lat\n lon = firstPosition.lon\n \n useRelativeAltitude = firstPosition.useRelativeAltitude\n \n if useRelativeAltitude:\n alt = firstPosition.relativeAltitude\n coordFrame = mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT\n else:\n alt = firstPosition.gpsAltitude\n coordFrame = mavutil.mavlink.MAV_FRAME_GLOBAL\n\n print ('First position at ({0},{1}) -> {2}'.format(lat, lon, alt))\n waypoints.append([lat, lon, alt])\n nextIndex = self.vehicle.commands.next\n # Make sure the drone is not in AUTO mode. \n #self.vehicle.mode = VehicleMode(\"LOITER\")\n self.clear_mission(cmds, coordFrame)\n \n # Add first position\n cmds.add(Command( 0, 0, 0, coordFrame, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, lat, lon, alt))\n \n # Add the remaining positions\n for position in request:\n lat = position.lat\n lon = position.lon\n if useRelativeAltitude:\n alt = position.relativeAltitude\n else:\n alt = position.gpsAltitude\n print ('Point at ({0},{1}) -> {2}'.format(lat, lon, alt))\n cmds.add(Command( 0, 0, 0, coordFrame, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, lat, lon, alt))\n waypoints.append([lat, lon, alt])\n \n print (\"Uploading new commands to drone\")\n cmds.upload()\n \n # Reset mission set to first (0) waypoint\n #if self.vehicle.commands.next !=0:\n # print \"Continuing mission...\"\n #else:\n # print \"Starting mission\"\n # self.vehicle.commands.next = 0\n if len(self.vehicle.waypoints)==0:\n print \"Starting mission\"\n self.vehicle.commands.next = 0\n else:\n print \"Continuing mission...\"\n self.vehicle.commands.next = nextIndex\n \n self.vehicle.waypoints = waypoints \n self.vehicle.mode = VehicleMode(\"AUTO\")\n \n self.print_mission() \n \n return droneconnect_pb2.Null()", "def _ros_location_callback(self, msg: NavSatFix):\n self._telegram_updater.bot.send_location(self._telegram_chat_id, location=Location(msg.longitude, msg.latitude))", "def odom_update(self, data):\n self, curr_pos = (data.pose.pose.position.x, data.pose.pose.position.y)", "def update(self):\n self.arest.update()", "def _setOceanLocation(self):\r\n\t\t## If the fluids_hrc exists\r\n\t\tif cmds.objExists('fluids_hrc'):\r\n\t\t\tif cmds.objExists('ocean_srf'):\r\n\t\t\t\tcmds.connectAttr('fluids_hrc.translateX', 'ocean_srf.translateX', f = True)\r\n\t\t\t\tcmds.connectAttr('fluids_hrc.translateZ', 'ocean_srf.translateZ', f = True)\r\n\t\t\telse:\r\n\t\t\t\tcmds.warning('MISSING ocean_srf node from scene....')\r\n\r\n\t\t\tif cmds.objExists('oceanPreviewPlane_prv'):\r\n\t\t\t\tcmds.connectAttr('fluids_hrc.translateX', 'oceanPreviewPlane_prv.translateX', f = True)\r\n\t\t\t\tcmds.connectAttr('fluids_hrc.translateZ', 'oceanPreviewPlane_prv.translateZ', f = True)\r\n\t\t\telse:\r\n\t\t\t\tcmds.warning('MISSING oceanPreviewPlane_prv node from scene....')\r\n\t\telse:\r\n\t\t\tcmds.warning('NO fluids_hrc FOUND! Can not move the ocean into final position. PLEASE CHECK FX PUBLISH NOW!')", "def update_agent_location_vector(self):\n\n for agent in self.agents:\n location = agent.getz()\n # print(location)\n if location[0] == 0:\n vectorized_agent_loc = location[1]\n elif location[0] == 1:\n vectorized_agent_loc = 4 + location[1]\n elif location[0] == 2:\n vectorized_agent_loc = 8 + location[1]\n else: # location[0] == 3\n vectorized_agent_loc = 12 + location[1]\n\n if agent.isBusy == False:\n # remove any location if it shows it as well\n self.agent_locations[0][vectorized_agent_loc] = 0\n continue\n else:\n self.agent_locations[0][vectorized_agent_loc] = 1\n if self.DEBUG:\n print('agent location vector is ', self.agent_locations)", "def updateOmLocation(self):\n if self.om != None:\n self.om.current_loc = self.destinations[self.current_loc]", "def loop(self):\n rospy.loginfo(\"Hexapod entering main loop...\")\n rospy.loginfo(\" Waiting for initial velocity command on /hexapod/cmd_vel/ ...\")\n while self.last_vel_cmd is None:\n self._loop_rate.sleep()\n\n # start main loop\n while not rospy.is_shutdown():\n\n chassis_pos_delta = None\n if self.last_vel_cmd is not None:\n dt = 1 # FIXME: Temporary for debugging\n lin_disp_lmt = self.linear_displacement_limit\n ang_disp_lmt = self.angular_displacement_limit\n chassis_pos_delta = Twist()\n chassis_pos_delta.linear.x = clamp(self.last_vel_cmd.linear.x*dt, -lin_disp_lmt, lin_disp_lmt)\n chassis_pos_delta.linear.y = clamp(self.last_vel_cmd.linear.y*dt, -lin_disp_lmt, lin_disp_lmt)\n chassis_pos_delta.linear.z = clamp(self.last_vel_cmd.linear.z*dt, -lin_disp_lmt, lin_disp_lmt)\n chassis_pos_delta.angular.x = clamp(self.last_vel_cmd.angular.x*dt, -ang_disp_lmt, ang_disp_lmt)\n chassis_pos_delta.angular.y = clamp(self.last_vel_cmd.angular.y*dt, -ang_disp_lmt, ang_disp_lmt)\n chassis_pos_delta.angular.z = clamp(self.last_vel_cmd.angular.z*dt, -ang_disp_lmt, ang_disp_lmt)\n self.last_vel_cmd = None\n\n if chassis_pos_delta is not None \\\n and not self._check_if_twist_msg_is_zero(chassis_pos_delta, linear_threshold=0.005, angular_threshold=0.01):\n # Get chassis position transformation\n chassis_pos_rot = transforms.euler_matrix(chassis_pos_delta.angular.x,\n chassis_pos_delta.angular.y,\n chassis_pos_delta.angular.z)[:3,:3]\n\n rospy.loginfo(\"chassis_pos_rot: %s\", chassis_pos_rot)\n chassis_pos_trans = np.zeros([3])\n chassis_pos_trans[0] = chassis_pos_delta.linear.x\n chassis_pos_trans[1] = chassis_pos_delta.linear.y\n chassis_pos_trans[2] = chassis_pos_delta.linear.z\n chassis_translation = np.dot(chassis_pos_trans, chassis_pos_rot)\n rospy.loginfo(\"chassis_translation: %s\", chassis_translation)\n\n leg_target_eff_translation = [[]]*LEGS\n # Get leg base positions relative to chassis\n leg_base_positions = self._get_base_to_leg_base_fk()\n for i, leg_base_pos in enumerate(leg_base_positions):\n leg_base_pos_arr = np.array(leg_base_pos).reshape(3,1)\n leg_base_pos_arr_new = np.dot(chassis_pos_rot, leg_base_pos_arr)\n leg_base_pos_trans_4 = np.ones(4).reshape(4,1)\n leg_base_pos_trans_4[:3,:] = leg_base_pos_arr_new\n # get leg base translations relative to leg_base coordinate frame\n relative_trans = np.dot(np.linalg.inv(self.kdl_fk_base_to_leg_base[i].forward([])), leg_base_pos_trans_4)\n relative_trans = relative_trans.reshape(1,4).tolist()[0][:3]\n leg_target_eff_translation[i] = relative_trans\n\n # Get leg target end-effector translations\n for i, q in enumerate(self.leg_jt_home_pos):\n base_to_leg_base_rot = self.kdl_fk_base_to_leg_base[i].forward([])[:3,:3]\n leg_target_eff_trans = np.dot(np.linalg.inv(base_to_leg_base_rot),chassis_translation).tolist()[0]\n leg_target_eff_translation[i] = [x+y for x,y in zip(leg_target_eff_translation[i], leg_target_eff_trans)] # TODO: FIXME: Technically incorrect\n\n # 1: side_alpha legs lift, plant to +transformation\n rospy.loginfo(\"1: side_alpha legs lift, plant to +transformation\")\n if self._odd_starts:\n active_legs = [1,2,5]\n else: # even starts\n active_legs = [0,3,4]\n\n init_wp = WaypointMsg()\n lift_wp = WaypointMsg()\n end_wp = WaypointMsg()\n\n legs_jt_init_pos = self._get_joint_angles()\n leg_eff_cur_pos = self._get_leg_base_to_eff_fk(legs_jt_init_pos)\n for i in range(LEGS):\n motor_names = [name for name in self.hebi_mapping[i]]\n # INITIAL POSITION\n init_wp.names.extend(motor_names)\n init_wp.positions.extend(legs_jt_init_pos[i])\n init_wp.velocities.extend([0.0]*ACTUATORS_PER_LEG)\n init_wp.accelerations.extend([0.0]*ACTUATORS_PER_LEG)\n # LIFT\n lift_wp.names.extend(motor_names)\n if i in active_legs:\n # apply translation\n leg_lift_eff_target_pos = [(x + y + z) / 2.0 for x, y, z in zip(leg_eff_cur_pos[i], self.leg_eff_home_pos[i], leg_target_eff_translation[i])]\n leg_lift_eff_target_pos = [x + y for x,y in zip(leg_lift_eff_target_pos, self.leg_eff_step_height[i])]\n # get ik\n leg_lift_jt_target_pos = self._get_pos_ik(self.trac_ik_leg_base_to_end[i], legs_jt_init_pos[i],\n leg_lift_eff_target_pos, seed_xyz=self.leg_eff_home_pos[i])\n lift_wp.positions.extend(leg_lift_jt_target_pos)\n lift_wp.velocities.extend([NAN]*ACTUATORS_PER_LEG)\n lift_wp.accelerations.extend([NAN]*ACTUATORS_PER_LEG)\n else:\n lift_wp.positions.extend(legs_jt_init_pos[i])\n lift_wp.velocities.extend([0.0]*ACTUATORS_PER_LEG)\n lift_wp.accelerations.extend([0.0]*ACTUATORS_PER_LEG)\n # PLANT\n end_wp.names.extend(motor_names)\n if i in active_legs:\n # apply translation\n leg_plant_eff_target_pos = [x + y for x,y in zip(self.leg_eff_home_pos[i], leg_target_eff_translation[i])]\n leg_plant_eff_target_pos[2] = self.leg_eff_home_pos[i][2] # end eff z-position should match home z-position\n # get ik\n leg_plant_jt_target_pos = self._get_pos_ik(self.trac_ik_leg_base_to_end[i], leg_lift_jt_target_pos,\n leg_plant_eff_target_pos, seed_xyz=leg_lift_eff_target_pos)\n end_wp.positions.extend(leg_plant_jt_target_pos)\n end_wp.velocities.extend([0.0]*ACTUATORS_PER_LEG)\n end_wp.accelerations.extend([0.0]*ACTUATORS_PER_LEG)\n else:\n end_wp.positions.extend(legs_jt_init_pos[i])\n end_wp.velocities.extend([0.0]*ACTUATORS_PER_LEG)\n end_wp.accelerations.extend([0.0]*ACTUATORS_PER_LEG)\n\n goal = TrajectoryGoal()\n goal.waypoints.append(init_wp)\n goal.waypoints.append(lift_wp)\n goal.waypoints.append(end_wp)\n goal.times.extend([0.0, 0.4, 0.8])\n\n self.release_pos([1,2,3,4,5,6])\n self.trajectory_action_client.send_goal(goal)\n self.trajectory_action_client.wait_for_result()\n self.hold_pos([1,2,3,4,5,6])\n\n # 2: side_alpha legs push to new home positions; side_beta legs push to -transformation\n rospy.loginfo(\"2: side_alpha legs push to new home positions; side_beta legs push to -transformation\")\n if self._odd_starts:\n active_legs = [0,3,4]\n else: # even starts\n active_legs = [1,2,5]\n\n init_wp = WaypointMsg()\n end_wp = WaypointMsg()\n\n legs_jt_init_pos = self._get_joint_angles()\n for i in range(LEGS):\n motor_names = [name for name in self.hebi_mapping[i]]\n # INITIAL POSITION\n init_wp.names.extend(motor_names)\n init_wp.positions.extend(legs_jt_init_pos[i])\n init_wp.velocities.extend([0.0]*ACTUATORS_PER_LEG)\n init_wp.accelerations.extend([0.0]*ACTUATORS_PER_LEG)\n # PUSH\n end_wp.names.extend(motor_names)\n if i in active_legs:\n # apply -translation\n leg_plant_eff_target_pos = [x + y for x,y in zip(self.leg_eff_home_pos[i], [-val for val in leg_target_eff_translation[i]])]\n leg_plant_eff_target_pos[2] = self.leg_eff_home_pos[i][2] # end eff z-position should match home z-position\n # get ik\n leg_plant_jt_target_pos = self._get_pos_ik(self.trac_ik_leg_base_to_end[i], legs_jt_init_pos[i],\n leg_plant_eff_target_pos, seed_xyz=self.leg_eff_home_pos[i])\n end_wp.positions.extend(leg_plant_jt_target_pos)\n end_wp.velocities.extend([0.0]*ACTUATORS_PER_LEG)\n end_wp.accelerations.extend([0.0]*ACTUATORS_PER_LEG)\n else:\n end_wp.positions.extend(self.leg_jt_home_pos[i])\n end_wp.velocities.extend([0.0]*ACTUATORS_PER_LEG)\n end_wp.accelerations.extend([0.0]*ACTUATORS_PER_LEG)\n\n goal = TrajectoryGoal()\n goal.waypoints.append(init_wp)\n goal.waypoints.append(end_wp)\n goal.times.extend([0.0, 0.4])\n\n self.release_pos([1,2,3,4,5,6])\n self.trajectory_action_client.send_goal(goal)\n self.trajectory_action_client.wait_for_result()\n self.hold_pos([1,2,3,4,5,6])\n\n self._odd_starts = not self._odd_starts\n\n self._loop_rate.sleep() # FIXME: Doesn't make sense to use this unless re-planning trajectories\n # end main loop", "def update_carsPosition(self ):\n self.positions = list()\n for car in self.fleet:\n self.positions.append(self.fleet[car].position)", "def update_visualization(self):\n rpos = self.sim.getAgentPosition(self.robot_num)\n rvel = self.sim.getAgentVelocity(self.robot_num)\n rrad = self.sim.getAgentRadius(self.robot_num)\n v_pref = self.sim.getAgentMaxSpeed(self.robot_num)\n theta = math.atan2(rvel[1], rvel[0])\n return_str = \"\"\n return_str += str(self.sim.getGlobalTime()) + \" \"\n return_str += \"(\" + str(rpos[0]) + \",\" + str(rpos[1]) + \") \"\n return_str += \"(\" + str(rvel[0]) + \",\" + str(rvel[1]) + \") \"\n return_str += str(rrad) + \" \"\n return_str += str(self.headings[self.robot_num]) + \" \"\n return_str += (\"(\" + str(self.overall_robot_goal[0])\n + \",\" + str(self.overall_robot_goal[1]) + \") \")\n return_str += str(v_pref) + \" \"\n return_str += str(theta) + \" \"\n if not self.single:\n for agent in self.agents:\n if agent != self.robot_num: # We already wrote out the robot\n pos = self.sim.getAgentPosition(agent)\n vel = self.sim.getAgentVelocity(agent)\n rad = self.sim.getAgentRadius(agent)\n return_str += \"(\" + str(pos[0]) + \",\" + str(pos[1]) + \") \"\n return_str += \"(\" + str(vel[0]) + \",\" + str(vel[1]) + \") \"\n return_str += str(rad) + \" \"\n return_str += str(self.headings[agent]) + \" \"\n for obs in self.obstacles:\n if len(obs) > 1:\n # Polygonal obstacle\n o = Polygon(obs)\n p = Point(rpos)\n p1, p2 = nearest_points(o, p)\n return_str += \"(\" + str(p1.x) + \",\" + str(p1.y) + \") \"\n return_str += \"(0,0) \"\n return_str += str(self.obs_width) + \" \"\n return_str += str(self.headings[self.robot_num]) + \" \"\n else:\n # Point obstacle\n return_str += \\\n \"(\" + str(obs[0][0]) + \",\" +str(obs[0][1]) + \") \"\n return_str += \"(0,0) \"\n return_str += str(self.obs_width) + \" \"\n return_str += str(self.headings[self.robot_num]) + \" \"\n return_str += \"\\n\"\n if self.file is not None:\n self.file.write(return_str)\n return return_str", "async def update(self, robot):\r\n if self.first:\r\n robot.was_turning = False\r\n robot.was_driving = False\r\n\r\n rotation_rad = math.radians(robot.rotation)\r\n rotation_cos = math.cos(rotation_rad)\r\n rotation_sin = math.sin(rotation_rad)\r\n if robot.was_driving:\r\n speed_delta = robot.delta_time * robot.ROBOT_SPEED\r\n\r\n robot.add_odom_position(robot, (rotation_cos * speed_delta, rotation_sin * speed_delta))\r\n robot.grid.setStart(robot.grid_position)\r\n else:\r\n robot.drive_timer = robot.DRIVE_COOLDOWN\r\n if robot.was_turning:\r\n robot.add_odom_rotation(robot, robot.TURN_YAW * robot.delta_time)\r\n\r\n changed = False\r\n if robot.ball is not None:\r\n if robot.prev_ball is not None:\r\n robot.ball_grid = robot.grid.worldToGridCoords(robot.ball)\r\n robot.ball_prev_grid = robot.grid.worldToGridCoords(robot.prev_ball)\r\n changed = robot.ball_grid != robot.ball_prev_grid\r\n else:\r\n changed = True\r\n \r\n if not changed and robot.prev_grid_position != robot.grid_position:\r\n changed = True\r\n\r\n if self.first:\r\n changed = True\r\n self.first = False\r\n\r\n rounded_grid = (round(robot.grid_position[0]), round(robot.grid_position[1]))\r\n if changed:\r\n robot.grid.clearObstacles()\r\n if robot.ball is not None:\r\n grid_points = getGridPoints(robot.ball_grid[0], robot.ball_grid[1], robot)\r\n for point in grid_points:\r\n if robot.grid.coordInBounds(point):\r\n robot.grid.addObstacle(point)\r\n\r\n # Wall obstacles.\r\n for i in range(0, robot.grid.width):\r\n robot.grid.addObstacle((i, 0))\r\n robot.grid.addObstacle((i, robot.grid.height - 1))\r\n for i in range(1, robot.grid.height - 1):\r\n robot.grid.addObstacle((0, i))\r\n robot.grid.addObstacle((robot.grid.width - 1, i))\r\n\r\n goal_to_ball = np.subtract(robot.ball, robot.goal_position)\r\n goal_distance = np.linalg.norm(goal_to_ball)\r\n if goal_distance == 0:\r\n return\r\n goal_direction = np.divide(goal_to_ball, goal_distance)\r\n goal_direction = np.multiply(goal_direction, (robot.RADIUS + robot.BALL_RADIUS) * 1.2)\r\n robot.target_position = np.add(robot.ball, goal_direction)\r\n robot.target_position = robot.grid.worldToGridCoords(robot.target_position)\r\n\r\n if robot.target_position is not None:\r\n robot.grid.clearGoals()\r\n robot.grid.setStart(rounded_grid)\r\n rounded_target = (round(robot.target_position[0]), round(robot.target_position[1]))\r\n robot.grid.addGoal(rounded_target)\r\n astar(robot.grid, heuristic)\r\n\r\n path = robot.grid.getPath()\r\n robot.was_turning = False\r\n if path is not None and len(path) > 1:\r\n robot.next_cell = path[0]\r\n if path[0] == rounded_grid:\r\n robot.next_cell = path[1]\r\n\r\n turn = getTurnDirection(rotation_cos, rotation_sin, rounded_grid, robot.next_cell)\r\n if abs(turn) > robot.TURN_THRESHOLD and abs(2 * math.pi - abs(turn)) > robot.TURN_THRESHOLD:\r\n robot.stop_all_motors()\r\n await robot.turn_in_place(radians(turn), num_retries=3).wait_for_completed()\r\n robot.add_odom_rotation(robot, math.degrees(turn))\r\n robot.was_driving = False\r\n else:\r\n await robot.drive_wheels(robot.ROBOT_SPEED, robot.ROBOT_SPEED, robot.ROBOT_ACCELERATION, robot.ROBOT_ACCELERATION)\r\n robot.was_driving = True\r\n else:\r\n robot.was_driving = False\r\n\r\n turn = getTurnDirection(rotation_cos, rotation_sin, robot.grid_position, robot.target_position)\r\n robot.stop_all_motors()\r\n if abs(turn) > robot.TURN_THRESHOLD and abs(2 * math.pi - abs(turn)) > robot.TURN_THRESHOLD:\r\n await robot.turn_in_place(radians(turn), num_retries=3).wait_for_completed()\r\n robot.add_odom_rotation(robot, math.degrees(turn))\r\n\r\n robot.stop_all_motors()\r\n distance = grid_distance(robot.grid_position[0], robot.grid_position[1], robot.target_position[0], robot.target_position[1]) * robot.grid.scale\r\n await robot.drive_straight(distance_mm(distance), speed_mmps(robot.HIT_SPEED), should_play_anim = False).wait_for_completed()\r\n robot.add_odom_forward(robot, distance)\r\n\r\n turn = getTurnDirection(rotation_cos, rotation_sin, robot.grid_position, robot.ball_grid)\r\n robot.stop_all_motors()\r\n if abs(turn) > robot.TURN_THRESHOLD and abs(2 * math.pi - abs(turn)) > robot.TURN_THRESHOLD:\r\n await robot.turn_in_place(radians(turn), num_retries=3).wait_for_completed()\r\n robot.add_odom_rotation(robot, math.degrees(turn))\r\n return goto_ball.HitBall()", "def exec_velocity_cmd(self, cmd):\n joint_names = self.joint_names()\n\n velocity_command = dict(zip(joint_names, cmd))\n\n self.set_joint_velocities(velocity_command)", "def _control(self):\n j = 0\n while not rospy.is_shutdown():\n if self.running:\n # Track the path for each vehicle.\n for i, vehicle_id in enumerate(self.vehicle_ids):\n omega = self._get_omega(vehicle_id)\n self.pub_omega.publish(vehicle_id, omega)\n\n # At the start of operation: start each vehicle with a constant\n # speed one after another, delayed with a set amount of samples.\n if j < self.headstart_samples * len(self.vehicle_ids):\n index = j / self.headstart_samples\n vehicle_id = self.vehicle_ids[index]\n\n self.pub_speed.publish(vehicle_id, self.v)\n\n # Normal operation: only control the speeds of the follower\n # vehicles. The velocities are obtained from the PIDs.\n else:\n for i, vehicle_id in enumerate(self.vehicle_ids[1:]):\n vel = self._get_vel(vehicle_id)\n self.pub_speed.publish(vehicle_id, vel)\n\n time.sleep(1. / self.rate)\n\n j += 1", "def _update_loc(self) -> None:\n self.state[:, :, Boids.Attr.LOC] += self.state[:, :, Boids.Attr.VEL]\n # wrap-around the simulated environment\n self.state[:, :, Boids.Attr.LOC] %= np.expand_dims(self.env_bounds, axis=1)", "def compute_controller(self):\n # region Input Info\n\n # here we implement an example for a consensus algorithm\n neig = self.get_neighbors()\n messages = self.get_messages()\n pos, rot = self.get_pos_and_orientation()\n \n #send message of positions to all neighbors indicating our position\n for n in neig:\n self.send_message(n, pos)\n \n # check if we received the position of our neighbors and compute desired change in position\n # as a function of the neighbors (message is composed of [neighbors id, position])\n dx = 0.\n dy = 0.\n # print(messages)\n # endregion\n if messages:\n # similar to laplacian but for each robot\n # for m in messages:\n # dx += m[1][0] - pos[0]\n # dy += m[1][1] - pos[1]\n\n # position of All robots\n Apos = np.zeros([6,2])\n Apos[self.id,:]=pos[0:2]\n for m in messages:\n Apos[m[0],:]=m[1][0:2]\n\n TarM = np.zeros([6,2])\n TarM[self.id, :] = self.TargetP[self.Tid,:]-pos[0:2]\n Cdiff = Apos-pos[0:2]\n Cdiff = np.sqrt(np.square(Cdiff[:,0])+np.square(Cdiff[:,1]))\n Cdiff = np.sum(Cdiff)\n Ddiff = self.P_Des-self.P_Des[self.id]\n Ddiff = np.sqrt(np.square(Ddiff[:, 0]) + np.square(Ddiff[:, 1]))\n Ddiff = np.sum(Ddiff)\n Tdiff = np.abs(Ddiff - Cdiff)\n\n\n\n\n # region Obstacles\n Obc = Apos\n # Obc = self.Obstacles\n # Obc = np.vstack([Obs,pos[0:2]])\n Diff = pos[0:2] - Obc\n for m in range(0, Diff.shape[0]):\n if (np.sqrt(np.square(Diff[m, 0]) + np.square(Diff[m, 1]))) > 0.35:\n Diff[m, :] = np.array([0, 0])\n\n DiffY = Diff[:, 1].reshape([1, -1])\n DiffX = Diff[:, 0].reshape([1, -1])\n x_odot = np.sum(np.exp(-np.square(DiffX) / self.Var) * DiffX)\n y_odot = np.sum(np.exp(-np.square(DiffY) / self.Var) * DiffY)\n\n ObsAv = np.array([x_odot, y_odot])\n # endregion\n\n\n NewGd = np.square(np.transpose(self.E) @ Apos)\n NewGd = (NewGd[:, 0] + NewGd[:, 1]).reshape([-1, 1])\n G = self.Gdsq - NewGd\n Rg = self.DistJacobian(Apos, self.Edges)\n p_ddot = np.zeros(([6, 2]))\n\n if (Tdiff < self.Thr):\n self.StartTimer = True\n\n\n if(self.StartTimer):\n self.Timer += 1\n if (self.Timer > 500+self.OffTimer):\n self.FormStable = True\n self.StartTimer = False\n self.Timer = 0\n\n if(self.Tid > 3 and np.sum(TarM[self.id, 0])<5):\n TarM[self.id, 0] = 5\n if (self.Tid > 3 and np.sum(TarM[self.id, 1]) < 5):\n TarM[self.id, 1] = 5\n if (self.Tid > 3 and np.sum(TarM[self.id, 1]) > -5):\n TarM[self.id, 1] = -5\n if (self.Tid > 3 and np.sum(TarM[self.id, 1]) > -5):\n TarM[self.id, 1] = -5\n\n if (self.Tid > 3 and np.sum(TarM[self.id, :]) < 0.01):\n self.Tid +=1\n\n if (self.FormStable):\n # Formation Done\n if self.Tid == 0 and self.Formation == \"square\":\n self.P_Des = self.Form_HRec(0.5)\n self.Reset_Form()\n # self.Tid += 1\n # self.FormStable = False\n print(self.P_Des, self.Formation, \" \", self.Tid)\n # self.K1 = 5\n # self.K2 = 50\n if (self.Tid < self.TargetP.shape[0]-1 and self.FormStable):\n self.Tid += 1\n if(self.Tid == 1):\n self.K1 = 2\n self.K3 = 10\n self.Thr = 0.001\n if (self.Tid == 2):\n self.K1 = 20\n self.K3 = 1\n self.P_Des = self.Form_HRec(0.5)\n self.Reset_Form()\n self.FormStable = False\n # Linear Control Law\n p_dot = np.zeros([6,2])\n p_dot = -self.K1 * np.matmul(self.L, Apos) + self.K1 * np.matmul(self.E, self.Z_Des)\n p_dot += self.dt * (self.OK / self.Var) * ObsAv\n # p_dot += self.K3 * TarM\n # Non - linear Control Law\n # p_ddot = self.K2 * (np.transpose(Rg) @ G).reshape([-1, 2])\n # p_dot += p_ddot*self.dt\n if(self.id == 0):\n # print(Tdiff,self.TargetP[self.Tid,:],np.sum(G),self.Tid,self.Timer)\n p_dot = self.K3 * TarM\n if (self.id == 0):\n print(Tdiff,self.TargetP[self.Tid,:],np.sum(G),self.Tid,self.Timer)\n # if(self.Tid == 1):\n # p_dot += -self.K1 * np.matmul(self.L, Apos) + self.K1 * np.matmul(self.E, self.Z_Des)\n\n dx = p_dot[self.id, 0]\n dy = p_dot[self.id, 1]\n\n # Non - linear Control\n # p_ddot = self.K2 * (np.transpose(Rg) @ G).reshape([-1, 2])\n # p_ddot += (self.OK / self.Var) * ObsAv\n # dx = self.dt * p_ddot[self.id, 0]\n # dy = self.dt * p_ddot[self.id, 1]\n #else:\n # TarM[self.id, :] = Tdiff\n # # Linear Control\n # p_dot = -self.K1 * np.matmul(self.L, Apos) + self.K1 * np.matmul(self.E, self.Z_Des)\n # p_dot += self.dt * (self.OK / self.Var) * ObsAv\n # p_dot += self.K3 * TarM\n # dx = p_dot[self.id, 0]\n # dy = p_dot[self.id, 1]\n\n # Non - linear Control\n # p_ddot = self.K2 * (np.transpose(Rg) @ G).reshape([-1, 2])\n # p_ddot += self.K3 * TarM\n # p_ddot += (self.OK / self.Var) * ObsAv\n # dx = self.dt * p_ddot[self.id, 0]\n # dy = self.dt * p_ddot[self.id, 1]\n\n # region Robot Wheel Control\n # integrate\n des_pos_x = pos[0] + self.dt * dx\n des_pos_y = pos[1] + self.dt * dy\n\n #compute velocity change for the wheels\n vel_norm = np.linalg.norm([dx, dy]) #norm of desired velocity\n if vel_norm < 0.01:\n vel_norm = 0.01\n des_theta = np.arctan2(dy/vel_norm, dx/vel_norm)\n right_wheel = np.sin(des_theta-rot)*vel_norm + np.cos(des_theta-rot)*vel_norm\n left_wheel = -np.sin(des_theta-rot)*vel_norm + np.cos(des_theta-rot)*vel_norm\n self.set_wheel_velocity([left_wheel, right_wheel])\n # endregion", "def update_world(mv_command):\n global bot_position, scan\n\n MoveRobot(mv_command)\n # ws.stepPhysics(steps=1)\n if checkAtFinalTime():\n return None\n scan_data = scan.getLeftCenterRightScanState()\n\n return scan_data", "def update_positions(request):\n raw_data = request.raw_post_data\n logging.info(\"isrproxy update_positions: %s\" % raw_data)\n update_positions_data = simplejson.loads(raw_data)\n\n # redirect update to dev server in production environment\n if not settings.DEV:\n dev_positions = filter(lambda p: p.get(\"Operator_ID\") == WB_DEV_OPERATOR_ID, update_positions_data)\n if dev_positions:\n deferred.defer(safe_fetch, url=\"http://dev.latest.waybetter-app.appspot.com/fleet/isrproxy/update/positions/\", payload=simplejson.dumps(dev_positions), method=POST, notify=False)\n update_positions_data = filter(lambda p: p.get(\"Operator_ID\") != WB_DEV_OPERATOR_ID, update_positions_data)\n\n\n ride_positions = []\n for rp_data in update_positions_data:\n station_id = rp_data.get(\"Operator_ID\")\n taxi_id = rp_data.get(\"Vehicle_ID\")\n lat = rp_data.get(\"Lat\")\n lon = rp_data.get(\"Lon\")\n timestamp = rp_data.get(\"Timestamp\")\n\n ride_uuid = ISRProxy.get_taxi_assignment(taxi_id, station_id)\n\n if all([station_id, ride_uuid, taxi_id, lat, lon, timestamp]):\n timestamp = dateutil_parser.parse(timestamp)\n timestamp = normalize_naive_israel_dt(timestamp)\n ride_positions.append(TaxiRidePosition(station_id, taxi_id, ride_uuid, lat, lon, timestamp))\n\n fleet_manager.update_positions(ride_positions)\n return HttpResponse(\"OK\")", "def update_offboard_command(self, cmd_vel_msg):\n self.onboard_controller.desired_roll = cmd_vel_msg.linear.x\n self.onboard_controller.desired_pitch = cmd_vel_msg.linear.y\n self.onboard_controller.desired_yaw_rate = cmd_vel_msg.angular.z\n self.onboard_controller.desired_climb_rate = cmd_vel_msg.linear.z", "def advance(distance, angle, da):\n cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=5)\n # How fast will we update the robot's movement?\n rate = 10\n # Set the equivalent ROS rate variable\n r = rospy.Rate(rate)\n # Set the forward linear speed to 0.2 meters per second\n if distance >= 0.0:\n linear_speed = 0.5\n else:\n linear_speed = -0.5\n # Set the travel distance in meters\n goal_distance = abs(distance)\n # Set the rotation speed in radians per second\n if angle < 0.0:\n angular_speed = -0.5\n else:\n angular_speed = 0.5\n # Set the angular tolerance in degrees converted to radians\n angular_tolerance = radians(0.5)\n # Set the rotation angle to angle in radians \n goal_angle = angle\n # Initialize the tf listener\n tf_listener = tf.TransformListener()\n # Give tf some time to fill its buffer\n rospy.sleep(2)\n # Set the map frame\n map_frame = '/map'\n # Set the odom frame\n odom_frame = '/odom'\n \"\"\" Find out if the robot uses /map->/odom transform \"\"\"\n try:\n tf_listener.waitForTransform(map_frame, odom_frame, rospy.Time(), rospy.Duration(1.0))\n except (tf.Exception, tf.ConnectivityException, tf.LookupException):\n rospy.loginfo(\"Cannot find transform between /map and /odom\")\n rospy.signal_shutdown(\"tf Exception\") \n # Find out if the robot uses /base_link or /base_footprint\n try:\n tf_listener.waitForTransform(odom_frame, '/base_footprint', rospy.Time(), rospy.Duration(1.0))\n base_frame = '/base_footprint'\n except (tf.Exception, tf.ConnectivityException, tf.LookupException):\n try:\n tf_listener.waitForTransform(odom_frame, '/base_link', rospy.Time(), rospy.Duration(1.0))\n base_frame = '/base_link'\n except (tf.Exception, tf.ConnectivityException, tf.LookupException):\n rospy.loginfo(\"Cannot find transform between /odom and /base_link or /base_footprint\")\n rospy.signal_shutdown(\"tf Exception\") \n # Initialize the position variable as a Point type\n position = Point() \n # Initialize the movement command\n move_cmd = Twist()\n \n\n # Get the starting position values \n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n \n x_start = position.x\n y_start = position.y\n \n # Keep track of the distance traveled\n dist = 0.0\n #pdb.set_trace()\n if da:\n print bcolors.OKGREEN + \"da True\" + bcolors.ENDC\n print bcolors.OKGREEN + \"Empieza distancia\" + bcolors.ENDC\n # Set the movement command to forward motion\n move_cmd.linear.x = linear_speed\n bump_count = 0\n # Enter the loop to move along\n while dist < goal_distance and not rospy.is_shutdown():\n #pdb.set_trace()\n last_dist = dist\n # Publish the Twist message and sleep 1 cycle \n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n # Get the current position\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n # Compute the Euclidean distance from the start\n dist = sqrt(pow((position.x - x_start), 2) + pow((position.y - y_start), 2))\n \n if dist == last_dist and dist != 0.0:\n bump_count += 1\n print \"dist, goal_distance\", dist, goal_distance\n print \"BUMP\"+str(bump_count)\n if bump_count > 10:\n # Move forward for a time to go the desired distance\n linear_duration = 1.5/abs(linear_speed) \n ticks = int(linear_duration * rate)\n move_cmd.linear.x *= -1\n for t in range(ticks):\n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n continue\n # Stop the robot before the rotation\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n \n # Set the movement command to a rotation\n move_cmd.angular.z = angular_speed\n # Track the last angle measured\n last_angle = quat_to_angle(rotation)\n print bcolors.OKGREEN + \"Empieza angle\" + bcolors.ENDC\n # Track how far we have turned\n turn_angle = 0\n done = False\n while abs(turn_angle + angular_tolerance) < abs(goal_angle) and not rospy.is_shutdown():\n # Publish the Twist message and sleep 1 cycle \n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n # Get the current rotation\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n # Compute the amount of rotation since the last loop\n delta_angle = normalize_angle(quat_to_angle(rotation) - last_angle)\n # Add to the running total\n turn_angle += delta_angle\n last_angle = quat_to_angle(rotation)\n\n if (abs(turn_angle + angular_tolerance) > abs(goal_angle*4/5) or abs(goal_angle) < radians(2)) and not done:\n #pdb.set_trace()\n # Stop the robot before the next leg\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n if angle < 0.0:\n angular_speed = -0.05\n else:\n angular_speed = 0.05\n # Set the movement command to a rotation\n move_cmd.angular.z = angular_speed\n done = True\n \n # Stop the robot before the next leg\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n else:\n print bcolors.OKGREEN + \"da False\" + bcolors.ENDC\n #pdb.set_trace()\n # Set the movement command to a rotation\n move_cmd.angular.z = angular_speed\n # Track the last angle measured\n last_angle = quat_to_angle(rotation)\n print bcolors.OKGREEN + \"Empieza angle\" + bcolors.ENDC\n # Track how far we have turned\n turn_angle = 0\n done = False\n while abs(turn_angle + angular_tolerance) < abs(goal_angle) and not rospy.is_shutdown():\n # Publish the Twist message and sleep 1 cycle \n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n # Get the current rotation\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n # Compute the amount of rotation since the last loop\n delta_angle = normalize_angle(quat_to_angle(rotation) - last_angle)\n # Add to the running total\n turn_angle += delta_angle\n last_angle = quat_to_angle(rotation)\n# print \"x\", position.x\n# print \"y\", position.y\n# print \"la\", last_angle\n# print \"ta\", degrees(turn_angle)\n# print \"\\n\"\n #raw_input(\"Press ENTER to continue ...\")\n if (abs(turn_angle + angular_tolerance) > abs(goal_angle*4/5) or abs(goal_angle) < radians(2)) and not done:\n #pdb.set_trace()\n # Stop the robot before the next leg\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n if angle < 0.0:\n angular_speed = -0.05\n else:\n angular_speed = 0.05\n # Set the movement command to a rotation\n move_cmd.angular.z = angular_speed\n done = True\n \n # Stop the robot before the next movement\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n print bcolors.OKGREEN + \"Empieza distancia\" + bcolors.ENDC \n #pdb.set_trace()\n # Get the starting position values \n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n \n x_start = position.x\n y_start = position.y\n \n move_cmd.linear.x = linear_speed\n # Keep track of the distance traveled\n dist = 0.0\n bump_count = 0\n # Enter the loop to move along\n while dist < goal_distance and not rospy.is_shutdown():\n last_dist = dist\n # Publish the Twist message and sleep 1 cycle \n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n # Get the current position\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n # Compute the Euclidean distance from the start\n dist = sqrt(pow((position.x - x_start), 2) + pow((position.y - y_start), 2))\n \n if dist == last_dist and dist != 0.0:\n bump_count += 1\n print \"dist, goal_distance\", dist, goal_distance\n print \"BUMP\"+str(bump_count)\n if bump_count > 10:\n # Move forward for a time to go the desired distance\n linear_duration = 1.5/abs(linear_speed) \n ticks = int(linear_duration * rate)\n move_cmd.linear.x *= -1\n for t in range(ticks):\n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n continue\n # Stop the robot before the rotation\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n\n # Stop the robot for good\n cmd_vel_pub.publish(Twist())\n rospy.sleep(1)\n\n # Get the current rotation\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n \n return (position, rotation)", "def move_base(self, x, y, z):\n # fill ROS message\n goal = control_msgs.msg.FollowJointTrajectoryGoal()\n traj = trajectory_msgs.msg.JointTrajectory()\n traj.joint_names = [\"odom_x\", \"odom_y\", \"odom_t\"]\n p = trajectory_msgs.msg.JointTrajectoryPoint()\n p.positions = [x, y, z]\n p.velocities = [0, 0, 0]\n p.time_from_start = rospy.Time(15)\n traj.points = [p]\n goal.trajectory = traj\n\n # send message to the action server\n self.cli.send_goal(goal)\n\n # wait for the action server to complete the order\n self.cli.wait_for_result()\n return self._running", "def odometry_callback(self, msg):\n if not bool(self.config):\n return\n\n linear = msg.twist.twist.linear\n angular = msg.twist.twist.angular\n v_linear = numpy.array([linear.x, linear.y, linear.z])\n v_angular = numpy.array([angular.x, angular.y, angular.z])\n\n if self.config['odom_vel_in_world']:\n # This is a temp. workaround for gazebo's pos3d plugin not behaving properly:\n # Twist should be provided wrt child_frame, gazebo provides it wrt world frame\n # see http://docs.ros.org/api/nav_msgs/html/msg/Odometry.html\n xyzw_array = lambda o: numpy.array([o.x, o.y, o.z, o.w])\n q_wb = xyzw_array(msg.pose.pose.orientation)\n R_bw = transf.quaternion_matrix(q_wb)[0:3, 0:3].transpose()\n\n v_linear = R_bw.dot(v_linear)\n v_angular = R_bw.dot(v_angular)\n \n # Compute compute control output:\n t = time_in_float_sec_from_msg(msg.header.stamp)\n \n e_v_linear = (self.v_linear_des - v_linear)\n e_v_angular = (self.v_angular_des - v_angular)\n \n a_linear = self.pid_linear.regulate(e_v_linear, t)\n a_angular = self.pid_angular.regulate(e_v_angular, t)\n\n # Convert and publish accel. command:\n cmd_accel = geometry_msgs.Accel()\n cmd_accel.linear = geometry_msgs.Vector3(x=a_linear[0], y=a_linear[1], z=a_linear[2])\n cmd_accel.angular = geometry_msgs.Vector3(x=a_angular[0], y=a_angular[1], z=a_angular[2])\n self.pub_cmd_accel.publish(cmd_accel)", "def drive_vehicle(x_offset, y_offset, run_cmd, width, front_distance_sensor_1, front_distance_sensor_2):\n # Initialise the PCA9685 using the default address (0x40).\n pwm = Adafruit_PCA9685.PCA9685()\n\n # set number of pins for direction of drives\n left_fwd_pin_1 = 4\n left_fwd_pin_2 = 17\n left_bwd_pin_1 = 18\n left_bwd_pin_2 = 23\n\n right_fwd_pin_1 = 22\n right_fwd_pin_2 = 27\n right_bwd_pin_1 = 24\n right_bwd_pin_2 = 25\n\n GPIO.setup(left_fwd_pin_1, GPIO.OUT) # left forward 1 pin\n GPIO.setup(left_fwd_pin_2, GPIO.OUT) # left forward 2 pin\n GPIO.setup(left_bwd_pin_1, GPIO.OUT) # left backward 1 pin\n GPIO.setup(left_bwd_pin_2, GPIO.OUT) # left backward 2 pin\n\n GPIO.setup(right_fwd_pin_1, GPIO.OUT) # right forward 1 pin\n GPIO.setup(right_fwd_pin_2, GPIO.OUT) # right forward 2 pin\n GPIO.setup(right_bwd_pin_1, GPIO.OUT) # right backward 1 pin\n GPIO.setup(right_bwd_pin_2, GPIO.OUT) # right backward 2 pin\n\n left_fwd = True\n left_bwd = False\n\n right_fwd = True\n right_bwd = False\n\n last_left = False\n\n while True:\n try:\n # Take shortest distance measured by ultrasound\n if front_distance_sensor_1.value < front_distance_sensor_2.value:\n front_distance = front_distance_sensor_1.value\n else:\n front_distance = front_distance_sensor_2.value\n\n if front_distance < 5 or width.value > 450:\n # if we are facing some obstacle or object we are looking for is close > stop\n left_speed = 0\n right_speed = 0\n left_fwd = left_bwd = right_fwd = right_bwd = False\n else:\n right_fwd = True\n if x_offset.value == -10:\n if last_left:\n # no object is detected by camera\n left_speed = 0 # 0.6 * max_speed\n # left_fwd = False\n # left_bwd = True\n right_speed = 0.9 * max_speed\n else:\n left_speed = 0.9 * max_speed\n right_speed = 0\n elif -5 < x_offset.value < 0:\n # object is in left part of the screen\n left_speed = pow(abs(x_offset.value), 2) * max_speed\n right_speed = max_speed\n left_fwd = True\n left_bwd = False\n last_left = True\n elif x_offset.value > 0:\n # object is in right part of the screen\n left_speed = max_speed\n right_speed = pow(x_offset.value, 2) * max_speed\n left_fwd = True\n left_bwd = False\n last_left = False\n else:\n # object is in the middle\n left_speed = max_speed\n right_speed = max_speed\n left_fwd = True\n left_bwd = False\n \n print('Speeds: Left {} Right {} Run {}'.format(left_speed, right_speed, run_cmd.value))\n\n # Right drives\n pwm.set_pwm(0, 0, int(right_speed*run_cmd.value))\n pwm.set_pwm(1, 0, int(right_speed*run_cmd.value))\n GPIO.output(left_fwd_pin_1, left_fwd)\n GPIO.output(left_fwd_pin_2, left_fwd)\n GPIO.output(left_bwd_pin_1, left_bwd)\n GPIO.output(left_bwd_pin_2, left_bwd)\n\n # Left drives\n pwm.set_pwm(4, 0, int(left_speed*run_cmd.value))\n pwm.set_pwm(5, 0, int(left_speed*run_cmd.value))\n GPIO.output(right_fwd_pin_1, right_fwd)\n GPIO.output(right_fwd_pin_2, right_fwd)\n GPIO.output(right_bwd_pin_1, right_bwd)\n GPIO.output(right_bwd_pin_2, right_bwd)\n except KeyboardInterrupt:\n # Stop robot after keyboard interrupt\n GPIO.output(left_fwd_pin_1, False)\n GPIO.output(left_fwd_pin_2, False)\n GPIO.output(left_bwd_pin_1, False)\n GPIO.output(left_bwd_pin_2, False)\n GPIO.output(right_fwd_pin_1, False)\n GPIO.output(right_fwd_pin_2, False)\n GPIO.output(right_bwd_pin_1, False)\n GPIO.output(right_bwd_pin_2, False)\n GPIO.cleanup()", "def pos(self, irc, msg, args, channel, system):\n if not self.registryValue('full_access', channel):\n irc.reply('Concord denies you access on this channel!')\n return\n\n if system:\n try:\n locationID = self._get_locationID(system)\n solar_system = self._get_SolarSystem(locationID)\n except:\n irc.error('Unknown location')\n return\n\n rows = self._sql(\"\"\"\n SELECT *\n FROM corporation_starbase\n WHERE owner_id = %s AND \"locationID\" = %s\"\"\", [self.corporationID,\n locationID], single=False)\n else:\n rows = self._sql(\"\"\"\n SELECT *\n FROM corporation_starbase\n WHERE owner_id = %s\n ORDER BY \"locationID\", \"moonID\" \"\"\", [self.corporationID], single=False)\n count = len(rows)\n\n STATES = {\n 0: \tircutils.mircColor('Unanchored', fg='teal'), # Also unanchoring? Has valid stateTimestamp.\n # Note that moonID is zero for unanchored Towers, but\n # locationID will still yield the solar system ID.\n 1: \tircutils.mircColor('Anchored/Offline', fg='orange'), # No time information stored.\n 2: \tircutils.mircColor('Onlining', fg='light green'), \t # Will be online at time = onlineTimestamp.\n 3: \tircutils.mircColor('Reinforced', fg='red'), # Until time = stateTimestamp.\n 4: \tircutils.mircColor('Online', fg='green') \t # Continuously since time = onlineTimestamp.\n }\n locations = {}\n if system:\n locations[solar_system['solarSystemID']] = solar_system\n irc.reply('Found {0} starbases in {1}'.format(\n ircutils.bold(count),\n self._colorize_system(solar_system)),\n prefixNick=False)\n else:\n irc.reply('Found {0} starbases'.format(count), prefixNick=False)\n\n for row in rows:\n locationID = int(row['locationID'])\n try:\n state = STATES[int(row['state'])]\n except:\n state = 'Unknown'\n if not locationID in locations:\n try:\n solar_system = self._get_SolarSystem(locationID)\n locations[locationID] = solar_system\n except UnknownLocation:\n irc.reply('{0} :: {1} :: {2} :: {3} :: {4}'.format(\n 'Unknown region',\n 'Unknown solarsystem {0}'.format(locationID), #solarsystem\n 'n/a', #moon\n self._get_type(int(row['typeID']))['typeName'], #pos type\n state #offline/online\n ), prefixNick=False)\n continue\n else:\n solar_system = locations[locationID]\n\n if not solar_system['regionID'] in locations:\n region = self._get_location(solar_system['regionID'])\n locations[solar_system['regionID']] = region\n else:\n region = locations[solar_system['regionID']]\n\n\n irc.reply('{0} :: {1} :: {2} :: {3} :: {4}'.format(\n region['itemName'],\n self._colorize_system(solar_system), #solarsystem\n self._get_location(row['moonID'])['itemName'], #moon\n self._get_type(int(row['typeID']))['typeName'], #pos type\n state #offline/online\n ), prefixNick=False)", "def move(solarsystem, asteroids, dt):\n calc_force(solarsystem, solarsystem, dt)\n calc_force(asteroids, solarsystem, dt)\n solarsystem['x'] += solarsystem['vx'] * dt\n solarsystem['y'] += solarsystem['vy'] * dt\n solarsystem['z'] += solarsystem['vz'] * dt\n\n asteroids['x'] += asteroids['vx'] * dt\n asteroids['y'] += asteroids['vy'] * dt\n asteroids['z'] += asteroids['vz'] * dt", "def move(self, usercmd):\n newPosX = self.robot.posX\n newPosY = self.robot.posY\n logging.info(\"Avant action :: newPosX={} / newPosY={}\".\\\n format(newPosX, newPosY))\n step = 1\n cmd = usercmd[0:1]\n if (len(usercmd) != 1):\n stpStr = usercmd[1:]\n if (stpStr.isdigit()):\n step = int(stpStr)\n else:\n step = 0\n if cmd.startswith(\"E\"):\n newPosX = newPosX + step\n elif cmd.startswith(\"W\"):\n newPosX = newPosX - step\n elif cmd.startswith(\"N\"):\n newPosY = newPosY - step\n elif cmd.startswith(\"S\"):\n newPosY = newPosY + step\n elif (cmd == \"Q\"):\n #quit\n print(\"Quit\")\n return False\n logging.info(\"newPosX={} / newPosY={}\".format(newPosX, newPosY))\n oldCar = \"\"\n newCar = \"\"\n if (self.canMove(cmd, self.robot, newPosX, newPosY)):\n oldCar = self.map[newPosY][newPosX]\n logging.info(\"originalMap[{}] : {}\".format(self.robot.posY, \\\n self.originalMap[self.robot.posY]))\n if (self.originalMap[self.robot.posY][self.robot.posX] == \".\"):\n self.map[self.robot.posY][self.robot.posX] = \".\"\n else:\n self.map[self.robot.posY][self.robot.posX] = \" \"\n self.robot.posX = newPosX\n self.robot.posY = newPosY\n self.map[newPosY][newPosX] = \"X\"\n logging.info(\"self.map[{}]={}\".format(newPosY, self.map[newPosY]))\n newCar = self.map[newPosY][newPosX]\n #print(oldCar, newCar)\n if (oldCar == \"U\" and newCar == \"X\"):\n print(\"Bravo, vous avez gagné !!!!!\")\n #Quit\n return False\n return True", "def move_robot(self, pose):\n # type: (Pose) -> None\n start_pos = ModelState()\n start_pos.model_name = 'turtlebot3'\n start_pos.pose = pose\n rospy.wait_for_service('/gazebo/set_model_state')\n try:\n set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)\n resp = set_state(start_pos)\n\n except rospy.ServiceException:\n print(\"Move Robot to position failed\")\n\n pub = rospy.Publisher('/initialpose', PoseWithCovarianceStamped, queue_size = 10)\n rospy.sleep(3)\n start_pos = PoseWithCovarianceStamped()\n start_pos.header.frame_id = 'map'\n start_pos.pose.pose = pose \n pub.publish(start_pos)", "def updateCoordinates():\n\n lat = request.args.get(\"lat\")\n lng = request.args.get(\"lng\")\n\n print(\"New coordinates\")\n print(\"Latitude: \" + lat)\n print(\"Longitude: \" + lng)\n\n if session[\"user_type\"] == \"Operator\":\n # for testing purposes, we'll record all position changes\n db.execute(\"\"\"INSERT OR REPLACE\n INTO active_trucks (\n lat, lng, operatorid ) \n VALUES (?,?,?);\"\"\", \\\n (lat,lng,session[\"user_id\"],))\n conn.commit()\n\n return \"True\"", "def verletIntegration(self):\n for atom in range(0, self.numAtoms):\n \n # Update velocities\n self.atoms[atom].vx += (self.atoms[atom].fx/self.m)*self.dt\n self.atoms[atom].vy += (self.atoms[atom].fy/self.m)*self.dt\n self.atoms[atom].vz += (self.atoms[atom].fz/self.m)*self.dt\n \n \n # Update positions\n newX = self.atoms[atom].x + self.atoms[atom].vx*self.dt\n newY = self.atoms[atom].y + self.atoms[atom].vy*self.dt\n newZ = self.atoms[atom].z + self.atoms[atom].vz*self.dt\n\n # Update current positions (applying PBC)\n if newX < 0:\n self.atoms[atom].x = newX + self.lbox\n elif newX > self.lbox:\n self.atoms[atom].x = newX - self.lbox\n else:\n self.atoms[atom].x = newX\n \n if newY < 0:\n self.atoms[atom].y = newY + self.lbox\n elif newY > self.lbox:\n self.atoms[atom].y = newY - self.lbox\n else:\n self.atoms[atom].y = newY\n \n if newZ < 0:\n self.atoms[atom].z = newZ + self.lbox\n elif newZ > self.lbox:\n self.atoms[atom].z = newZ - self.lbox\n else:\n self.atoms[atom].z = newZ", "def test_vw_controller(self):\n pass\n\n yarp.Network.init()\n\n pose_stream = yarp.BufferedPortBottle()\n pose_stream.open(\"/morse/test/pose/in\")\n yarp.Network.connect(\"/morse/robots/ATRV/Pose/out\", \"/morse/test/pose/in\")\n\n cmd_stream = yarp.BufferedPortBottle()\n cmd_stream.open(\"/morse/test/vw/out\")\n yarp.Network.connect(\"/morse/test/vw/out\", \"/morse/robots/ATRV/Motion_Controller/in\")\n \n # Read the start position, it must be (0.0, 0.0, 0.0)\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n\n send_speed(cmd_stream, 1.0, 0.0, 2.0)\n\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, -1.0, 0.0, 2.0)\n\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, 1.0, -math.pi/4.0, 2.0)\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 4.0 / math.pi, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), -4.0 / math.pi , delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), -math.pi/2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, 0.5, -math.pi/8.0, 12.0)\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, -2.0, math.pi/2.0, 3.0)\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 4.0 / math.pi, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), -4.0 / math.pi , delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), -math.pi/2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n yarp.Network.fini()", "def run(self):\n \n # read sensor value (actual value)\n x = self.ORION_CB.get_entity_attribute_value(entity_name=self.params['sensor_entity_name'],\n attribute_name=self.params['sensor_attrs'])\n # set 0 if empty\n if x == '\" \"':\n x = '0'\n # convert to float\n self.x_act = float(x) \n # calculate PID output\n self.y = self.PID.run(x_act = self.x_act, x_set = self.params['setpoint'])\n # send post command\n self.ORION_CB.post_cmd_v1(self.params['actuator_entity_name'], \n self.params['actuator_type'],\n self.params['actuator_command'], round(self.y,3))", "async def loop():\n # ArmDevice.storage.joints_pos = await get_positions() # Use this if encoders are wired up.\n # ArmDevice.storage.joints_pos = simulate_positions() # Use this for testing without position feedback.\n log.debug(\"command: {}\".format(ArmDevice.storage.command))\n ArmDevice.storage.controller.user_command(ArmDevice.storage.mode, *ArmDevice.storage.command)\n ArmDevice.storage.speeds = ArmDevice.storage.controller.update_duties(ArmDevice.storage.joints_pos)\n\n # publish speeds/duty cycles here\n log.debug(\"joints_pos: {}\".format(ArmDevice.storage.joints_pos))\n log.debug(\"speeds: {}\".format(ArmDevice.storage.speeds))\n await send_duties()", "def cmdVelCallback(self, req):\n x = req.linear.x # m/s\n th = req.angular.z # rad/s\n\n if x == 0:\n # Turn in place\n right = th * self.wheel_track * self.gear_reduction / 2.0\n left = -right\n elif th == 0: \n # Pure forward/backward motion\n left = right = x\n else:\n # Rotation about a point in space\n left = x - th * self.wheel_track * self.gear_reduction / 2.0\n right = x + th * self.wheel_track * self.gear_reduction / 2.0\n\n # Set motor speeds in meters per second.\n self.mySerializer.mogo_m_per_s([1, 2], [left, right])", "def update(self):\n self._spots[constants.ROAD_LENGTH - 1].remove_car(0)\n self._spots[0].remove_car(1)\n\n for i in xrange(constants.ROAD_LENGTH - 1):\n self._num_queued += (self._spots[constants.ROAD_LENGTH - i - 2].\n update_spot(self._spots[constants.ROAD_LENGTH - i - 1], 0))\n self._num_queued += self._spots[i + 1].update_spot(\n self._spots[i], 1)\n\n self.create_car(self._distribution)\n self._steps += 1", "def move(self):\n t = self.get_time_step() # Get the timestep\n\n for body in self.bodies:\n body.update_pos_revs(t, self.time)\n\n # Once all the positions are updated, update all velocities and\n # accelerations.\n for body in self.bodies:\n body.update_vel_acc(t, self.bodies)\n\n self.get_energies() # Get the total energy\n self.time += t # Increase the time by time step.", "def run(self):\n while not rospy.is_shutdown():\n if self.state is not None:\n # get robot state\n x = self.state.pose.position.x\n y = self.state.pose.position.y\n quat = [\n self.state.pose.orientation.x,\n self.state.pose.orientation.y,\n self.state.pose.orientation.z,\n self.state.pose.orientation.w\n ]\n eulers = tf.transformations.euler_from_quaternion(quat, 'sxyz')\n # generate ping and send\n ping, heading = self.sim.gen_ping((x, y), eulers[2])\n self.__socket.send(self.__create_msg(ping))\n rospy.logdebug(\n \"Sent ping with heading: \" + str(np.rad2deg(heading)))\n self.rate.sleep()", "def update():\n # TODO: Follow the wall to the right of the car without hitting anything.\n global DIST, RIGHT_TOP_WINDOW, LEFT_TOP_WINDOW, RIGHT_WINDOW, LEFT_WINDOW, FRONT_WINDOW, REAR_WINDOW\n global cur_state\n \n scan = rc.lidar.get_samples()\n scan = (scan - 0.01) % 100000\n\n speed = 1\n angle = 0\n\n _, right_dist = rc_utils.get_lidar_closest_point(scan, RIGHT_WINDOW)\n _, left_dist = rc_utils.get_lidar_closest_point(scan, LEFT_WINDOW)\n _, right_top_dist = rc_utils.get_lidar_closest_point(scan, RIGHT_TOP_WINDOW)\n _, left_top_dist = rc_utils.get_lidar_closest_point(scan, LEFT_TOP_WINDOW)\n _, front_dist = rc_utils.get_lidar_closest_point(scan, FRONT_WINDOW)\n _, rear_dist = rc_utils.get_lidar_closest_point(scan, REAR_WINDOW)\n\n \n if cur_state == State.drive:\n if right_top_dist > left_top_dist:\n angle = angle_controller(right_top_dist, 1) \n else:\n angle = angle_controller(left_top_dist, -1)\n\n if abs(angle) > 0.75:\n kP = 2\n speed = 1 / (abs(angle) * kP)\n speed = rc_utils.clamp(speed, -1, 1)\n\n rc.drive.set_speed_angle(speed, angle)", "def send_lsp_update(lsp_name, new_path):\n print(\"Updating \", lsp_name, \"on NorthStar Controller\")\n requs = requests.get(\n 'https://' + server_ip +\n ':8443/NorthStar/API/v1/tenant/1/topology/1/te-lsps/',\n headers=auth_header, verify=False)\n dump = json.dumps(requs.json())\n lsp_list = json.loads(dump)\n # Find target LSP to use lspIndex\n for lsp in lsp_list:\n if lsp['name'] == lsp_name:\n break\n # Fill only the required fields\n # ero = ero_input\n ero = []\n\n # Build new ERO Data\n\n print lsp\n for ip_address in new_path:\n hop = {\n \"topoObjectType\": \"ipv4\",\n \"address\": ip_address,\n # \"loose\" : True,\n }\n ero.append(hop)\n new_lsp = {}\n# \"provisioningType\":\"SR\"\n for key in ('from', 'to', 'name', 'lspIndex', 'pathType', 'provisioningType'):\n new_lsp[key] = lsp[key]\n\n new_lsp['plannedProperties'] = {\n \"bandwidth\": \"100M\",\n 'ero': ero\n # 'calculatedEro' : []\n #'preferredEro' : ero\n }\n response = requests.put(\n 'https://10.10.2.64:8443/NorthStar/API/v1/tenant/1/topology/1/te-lsps/' + str(new_lsp[\n 'lspIndex']),\n json=new_lsp, headers=auth_header, verify=False)\n print(\"LSP Updated on NorthStar Controller\")\n print response", "def go_near(furniture_name, robot_teleport):\n if furniture_name == \"livingroom_coffeetable\":\n print(\"Request to put robot at livingroom_coffeetable.\")\n x_y_z_yaw_pitch_roll = {\"x\": 4.5, \"y\": 7.3, \"z\": 0, \"yaw\": 3.8, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"bedroom_chest\":\n print(\"Request to put robot at bedroom_chest.\")\n x_y_z_yaw_pitch_roll = {\"x\": 5, \"y\": 11.3, \"z\": 0, \"yaw\": 0.0, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"bedroom_console\":\n print(\"Request to put robot at bedroom_console.\")\n x_y_z_yaw_pitch_roll = {\"x\": 4.2, \"y\": 12.2, \"z\": 0, \"yaw\": math.pi/2, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"bedroom_bedsidetable\":\n print(\"Request to put robot at bedroom_bedsidetable.\")\n x_y_z_yaw_pitch_roll = {\"x\": 3.1, \"y\": 12.1, \"z\": 0, \"yaw\": math.pi, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"bedroom_shelf\":\n print(\"Request to put robot at bedroom_shelf.\")\n x_y_z_yaw_pitch_roll = {\"x\": 2.4, \"y\": 9.8, \"z\": 0, \"yaw\": 3*math.pi/2, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"kitchen_cupboard\":\n print(\"Request to put robot at kitchen_cupboard.\")\n x_y_z_yaw_pitch_roll = {\"x\": 6.7, \"y\": 10.6, \"z\": 0, \"yaw\": math.pi, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"kitchen_table\":\n print(\"Request to put robot at kitchen_table\")\n x_y_z_yaw_pitch_roll = {\"x\": 7.8, \"y\": 10.2, \"z\": 0, \"yaw\": math.pi/8, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"livingroom_table\":\n print(\"Request to put robot at livingroom_table\")\n x_y_z_yaw_pitch_roll = {\"x\": 7.4, \"y\": 7.6, \"z\": 0, \"yaw\": 3*math.pi/2, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n else:\n print(\"Unknown furniture: \" + furniture_name)", "def go_to_start():\n global ID_R, POS_R, VEL_R, I_R\n\n motor_commands = np.array([[1, 0, 0.0],\n [2, 0, 0.0],\n [3, 0, 0.0],\n [4, 0, 0.0]])\n\n rospy.loginfo(\"Moving to start position\")\n\n return motor_commands", "def OrbitPos(self, rv, t, m):\n \n params = np.array(rv)\n params = params.flatten()\n \n def GravityODE(rv,t):\n G = 6.67e-11\n m = 5.972e24\n x = rv[0]\n y = rv[1]\n vx = rv[2]\n vy = rv[3]\n \n dvydt = -((G*m*y)/((x**2+y**2)**(3/2)))\n dvxdt = -((G*m*x)/((x**2+y**2)**(3/2)))\n dxdt = vx\n dydt = vy\n\n pos_derivs = np.array([dxdt,dydt])\n v_deriv = np.array([dvxdt,dvydt])\n derivs = np.hstack((pos_derivs,v_deriv))\n \n return derivs \n \n satellite_orbit = integrate.odeint(GravityODE,params,t)\n \n return satellite_orbit[:,0],satellite_orbit[:,1]", "async def update(self):\n \n logging.info('updating state...')\n info = await self.send_command(\"$dat\", \"upd01-\")\n if not info:\n msg = \"Unable to get data about windows and scenes from Gateway\"\n return msg\n\n logging.debug('hub response is :')\n logging.debug(info)\n prefix = None\n lines = re.split(r'[\\n\\r]+', info)\n\n for line in lines:\n line = line.strip()\n if not prefix:\n prefix = line[:2]\n elif not line.startswith(prefix):\n continue\n else:\n line = line[2:]\n\n if line.startswith(\"$cr\"):\n # name of room\n room_id = line[3:5]\n room_name = line.split('-')[-1].strip()\n if(not room_name in self.rooms):\n logging.debug('creating room '+room_name)\n self.rooms[room_name] = HunterDouglasPlatinumRoom(hub=self, name=room_name, id=int(room_id))\n elif line.startswith(\"$cm\"):\n # name of scene\n scene_id = line[3:5]\n scene_name = line.split('-')[-1].strip()\n if(not scene_name in self.scenes):\n logging.debug('creating scene '+scene_name)\n self.scenes[scene_name] = HunterDouglasPlatinumScene(hub=self, name=scene_name, id=int(scene_id))\n elif line.startswith(\"$cs\"):\n # name of a shade\n parts = line.split('-')\n shade_id = line[3:5]\n shade_name = parts[-1].strip()\n room_id = parts[1]\n if(not shade_name in self.shades):\n logging.debug('creating shade '+shade_name)\n self.shades[shade_name] = HunterDouglasPlatinumShade(hub=self, name=shade_name, id=int(shade_id), room=int(room_id))\n elif line.startswith(\"$cp\"):\n # state of a shade\n shade_id = line[3:5]\n state = line[-4:-1]\n state = int(state)\n shade = self.get_shade(id=int(shade_id))\n logging.debug('updating shade state for shade '+shade_id+' to '+str(state)+' for shade '+str(shade))\n if shade:\n shade.set_state(state)\n return \"\"", "def send_ned_velocity(vehicle,velocity_x, velocity_y, velocity_z, duration):\n msg = vehicle.message_factory.set_position_target_local_ned_encode(\n 0, # time_boot_ms (not used)\n 0, 0, # target system, target component\n mav.mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame\n 0b0000111111000111, # type_mask (only speeds enabled)\n 0, 0, 0, # x, y, z positions (not used)\n velocity_x, velocity_y, velocity_z, # x, y, z velocity in m/s\n 0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)\n 0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)\n\n\n # send command to vehicle on 1 Hz cycle\n for x in range(0,duration):\n vehicle.send_mavlink(msg)\n time.sleep(1)", "def robotCode(self):\n vRobot = v2PosB(self.getRobotConf(self.bot), self.ballEngine.getBallPose(),self.vmax)\n self.setMotorVelocities(vRobot[0], vRobot[1])", "def __init__(self):\n \n # Publishers\n self.pub_vel_prop = rospy.Publisher('/aer1217_ardrone/vel_prop', \n MotorCommands, queue_size=300)\n \n self.model_name = 'ARDroneCarre'\n \n self.pub_vicon_data = rospy.Publisher('/vicon/{0}/{0}'.format(\n self.model_name),\n TransformStamped, queue_size=30)\n\n \n # Subscribers\n self.sub_gazebo_pose = rospy.Subscriber('/aer1217_ardrone/gazebo_state', \n GazeboState,\n self.update_quadrotor_state)\n \n self.sub_cmd_vel = rospy.Subscriber('cmd_vel_RHC', \n Twist,\n self.update_offboard_command)\n \n \n # Initialize messages for publishing\n self.vel_prop_msg = MotorCommands()\n self.quadrotor_state = TransformStamped()\n \n # Run the onboard controller at 200 Hz\n self.onboard_loop_frequency = 200.\n \n # Create an onboard controller for calculation of the motor commands\n self.onboard_controller = ARDroneOnboardController()\n \n # Run this ROS node at the onboard loop frequency\n self.pub_prop_vel = rospy.Timer(rospy.Duration(1. / \n self.onboard_loop_frequency), self.update_motor_speeds)\n \n # Keep time for differentiation and integration within the controller\n self.old_time = rospy.get_time()", "def _broadcast_odometry_info(self, msg):\n # If we got this far, we can assume that the Propeller board is initialized and the motors should be on.\n # The _switch_motors() function will deal with the _SafeToOperate issue\n if not self._motorsOn:\n self._switch_motors(True)\n\n x = msg.x\n y = msg.y\n # 3 is odom based heading and 4 is gyro based\n theta = msg.theta # On ArloBot odometry derived heading works best.\n alternate_theta = msg.alternate_theta\n\n vx = msg.vx\n omega = msg.omega\n\n quaternion = Quaternion()\n quaternion.x = 0.0\n quaternion.y = 0.0\n quaternion.z = sin(theta / 2.0)\n quaternion.w = cos(theta / 2.0)\n\n ros_now = rospy.Time.now()\n\n # First, we'll publish the transform from frame odom to frame base_link over tf\n # Note that sendTransform requires that 'to' is passed in before 'from' while\n # the TransformListener' lookupTransform function expects 'from' first followed by 'to'.\n # This transform conflicts with transforms built into the Turtle stack\n # http://wiki.ros.org/tf/Tutorials/Writing%20a%20tf%20broadcaster%20%28Python%29\n # This is done in/with the robot_pose_ekf because it can integrate IMU/gyro data\n # using an \"extended Kalman filter\"\n # REMOVE this \"line\" if you use robot_pose_ekf\n self._OdometryTransformBroadcaster.sendTransform(\n (x, y, 0),\n (quaternion.x, quaternion.y, quaternion.z, quaternion.w),\n ros_now,\n \"base_footprint\",\n \"odom\"\n )\n\n # next, we will publish the odometry message over ROS\n odometry = Odometry()\n odometry.header.frame_id = \"odom\"\n odometry.header.stamp = ros_now\n odometry.pose.pose.position.x = x\n odometry.pose.pose.position.y = y\n odometry.pose.pose.position.z = 0\n odometry.pose.pose.orientation = quaternion\n\n odometry.child_frame_id = \"base_link\"\n odometry.twist.twist.linear.x = vx\n odometry.twist.twist.linear.y = 0\n odometry.twist.twist.angular.z = omega\n\n # Save last X, Y and Heading for reuse if we have to reset:\n self.lastX = x\n self.lastY = y\n self.lastHeading = theta\n self.alternate_heading = alternate_theta\n\n # robot_pose_ekf needs these covariances and we may need to adjust them.\n # From: ~/turtlebot/src/turtlebot_create/create_node/src/create_node/covariances.py\n # However, this is not needed because we are not using robot_pose_ekf\n # odometry.pose.covariance = [1e-3, 0, 0, 0, 0, 0,\n # 0, 1e-3, 0, 0, 0, 0,\n # 0, 0, 1e6, 0, 0, 0,\n # 0, 0, 0, 1e6, 0, 0,\n # 0, 0, 0, 0, 1e6, 0,\n # 0, 0, 0, 0, 0, 1e3]\n #\n # odometry.twist.covariance = [1e-3, 0, 0, 0, 0, 0,\n # 0, 1e-3, 0, 0, 0, 0,\n # 0, 0, 1e6, 0, 0, 0,\n # 0, 0, 0, 1e6, 0, 0,\n # 0, 0, 0, 0, 1e6, 0,\n # 0, 0, 0, 0, 0, 1e3]\n\n self._OdometryPublisher.publish(odometry)", "def run(self):\n move_cmd = Twist()\n move_cmd.linear.x = 0\n move_cmd.angular.z = 0\n\n while not rospy.is_shutdown():\n # bump logic as previous psets\n if self.bump:\n self.bump = False\n # move backwards\n move_cmd.linear.x = LIN_SPEED * -1\n for i in range(5):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n rospy.sleep(1)\n\n # turn randomly in a random direction\n move_cmd.linear.x = 0\n move_cmd.angular.z = ROT_SPEED * ((-1)**random.randint(1,2))\n\n if self.bump == 0:\n move_cmd.angular.z = ROT_SPEED * (-1)\n elif self.bump == 2:\n move_cmd.angular.z = ROT_SPEED\n\n for i in range(random.randint(5,15)):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n rospy.sleep(1)\n\n move_cmd.angular.z = 0\n # if somethin in the screen is really close\n elif self.min_val < MIN_THRESHOLD:\n # make sure it's not the sock/leg warmer, and is actually an obstacle\n if self.obstacle_x <= self.x or self.obstacle_x >= self.x + self.w or abs(self.min_val - self.dist) > 0.1:\n move_cmd.linear.x = 0\n # turn away\n if self.obstacle_x > 320:\n move_cmd.angular.z = ROT_SPEED / 2\n else:\n move_cmd.angular.z = -ROT_SPEED / 2\n # self.min_val = 100\n for i in range(10):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n self.last_move = rospy.Time.now()\n else:\n rospy.loginfo(\"Perimeter \" + str(self.perimeter_size))\n rospy.loginfo(\"Distance is \" + str(self.dist))\n\n # normalize angle error to rot speed\n ang_error_norm = -float(self.ang_error) / 100\n\n # set min and max rot speed\n if ang_error_norm < -ROT_SPEED:\n ang_error_norm = -ROT_SPEED\n elif ang_error_norm > ROT_SPEED:\n ang_error_norm = ROT_SPEED\n\n move_cmd.angular.z = ang_error_norm\n\n if RACE == False:\n # normalize dist error to lin speed\n self.dist_error = self.dist - 0.5\n dist_error_norm = float(self.dist_error) / 2\n\n if dist_error_norm < 0:\n # if NaN (self.dist gets set to -1)\n if dist_error_norm > -0.7:\n self.lost = 0\n # if too close\n else:\n self.lost += 1\n # if it's been more than 2 seconds\n if rospy.Time.now() > self.last_move + rospy.Duration(2):\n dist_error_norm = 0\n # if been lost for a while rotate and beep\n if self.lost > 20:\n move_cmd.angular.z = ROT_SPEED / 4\n self.beep.publish(4)\n else:\n # continue as previous\n dist_error_norm = self.last_speed\n else:\n # set max lin speed\n if dist_error_norm > LIN_SPEED:\n dist_error_norm = LIN_SPEED\n\n # reset lost stats\n self.lost = 0\n self.last_speed = dist_error_norm\n self.last_move = rospy.Time.now()\n\n move_cmd.linear.x = dist_error_norm\n else:\n move_cmd.linear.x = LIN_SPEED\n\n self.cmd_vel.publish(move_cmd)", "def main():\n\n #robot = S2Serial( \"/dev/ttyUSB0\" )\n robot = S2Fluke2( \"/dev/rfcomm2\" )\n\n print( \"getMotorStats : \", robot.getMotorStats() )\n print( \"getEncoders : \", robot.getEncoders( 1 ) )\n print( \"getStall : \", robot.getStall() )\n print( \"setMotors 100, -100 : \", robot.setMotors( 100, -100) )\n time.sleep( 3.0 )\n print( \"setMotors -100, 100 : \", robot.setMotors( -100, 100) )\n time.sleep( 3.0 )\n print( \"setMotorsOff : \", robot.setMotorsOff() )\n\n robot.close()", "def update_motor_target(data):\n print('sending new motor target')\n slider_target = json.dumps({\"id\" : \"Motor1\", \"target\": data})\n SERIAL_PARENT.send(slider_target)\n OUTGOING.append(slider_target)", "def execute(self, sim):\n\t\tfor i in self.vehicle.neighbors:\n\t\t\tneighbor = sim.getCar(i) #take the car object\t\n\n\t\t\t# If needed update GUI\n\t\t\tif not sim.no_graphics:\n\t\t\t\tif neighbor.state == car.State.VULNERABLE:\n\t\t\t\t\tvisualGraph.visualInfect(self.vehicle, neighbor)\n\n\t\t\tneighbor.on_receive(self.msg)\n\n\t\t# If using GUI sleep a bit\n\t\tif not sim.no_graphics:\n\t\t\ttime.sleep(0.01)", "def send_robot_cmd(self, command, *args):\n \n \n if self.robot_commander is None:\n self.start_robot_publisher()\n time.sleep(.5)\n\n # choose which platform\n #if GlobalSettings.USE_TEGA:\n msg = TegaBehaviors.get_msg_from_behavior(command, args)\n #else:\n # msg = JiboBehaviors.get_msg_from_behavior(command, args)\n\n # add header\n self.robot_commander.publish(msg) # would be nice to guarantee message performance here\n #rospy.loginfo(msg)", "def update(self):\n if self.dir == \"r\":\n self.vx = 10\n self.vy = 0\n elif self.dir == \"l\":\n self.vx = -10\n self.vy = 0\n elif self.dir == \"u\":\n self.vx = 0\n self.vy = -10\n elif self.dir == \"d\":\n self.vx = 0\n self.vy = 10\n elif self.dir == \"None\":\n self.vx = 0\n self.vy = 0\n self.x += self.vx\n self.y += self.vy", "def execute(self):\n self.status_message = \"State: Execute - Executing Motion Plan\"\n self.current_state = \"execute\"\n self.next_state = \"idle\"\n collect_info = threading.Thread(\n target=self.write_joint_pos, args=(\"notp_wp\",))\n collect_info.start()\n for wp in self.waypoints:\n # Ensure the correct number of joint angles\n full_wp = [0.0] * self.rexarm.num_joints\n full_wp[0:len(wp)] = wp\n # TODO: Send the waypoints to the trajectory planner and break if estop\n if self.next_state == \"estop\":\n break\n self.rexarm.set_positions(full_wp)\n time.sleep(1.5)", "def setPos(self,pos,c='cartesian'):\n\t\t#self.velocities={'angMachine':11.8*2*pi/60., 'angCranes': 15*pi/180, 'radial': 1} #radians/sec and m/s\n\t\ttraveltime=0\n\t\tif pos != self.pos:\n\t\t\tself.lastPos=self.pos\n\t\t\tif c=='cartesian':\n\t\t\t\tself.pSpots.append(pos)\n\t\t\t\tself.pos=pos\n\t\t\t\tself.posCyl=self.m.getCylindrical(pos)\n\t\t\telif c=='cylindrical':\n\t\t\t\tself.posCyl=pos\n\t\t\t\tself.pos=self.m.getCartesian(pos)\n\t\t\t\tself.pSpots.append(self.pos)\n\t\t\telse:\n\t\t\t\traise Exception(\"ERROR: setPos only accepts cartesian and cylindrical coordinates %s\"%c)\n\t\t\tif self.otherDevice is not None:\n\t\t\t\t#get angles in radians:\n\t\t\t\tif self.mountPoint=='left':\n\t\t\t\t\t[betaPr, alphaPr, thPr, r1Pr, r2Pr]=self.m.getAngles(self.pos, self.otherDevice.pos, optimize=True)\n\t\t\t\t\t[beta, alpha, th, r1,r2]=self.m.getAngles(self.lastPos, self.otherDevice.pos, optimize=True)\n\t\t\t\telse:\n\t\t\t\t\t[alpha, beta, th, r1, r2]=self.m.getAngles(self.otherDevice.pos,self.lastPos, optimize=True)\n\t\t\t\t\t[alphaPr, betaPr, thPr, r1Pr, r2Pr]=self.m.getAngles(self.otherDevice.pos,self.pos, optimize=True)\n\t\t\t\tself.m.leftAngleMoni.observe(betaPr*360./(2*pi), self.sim.now())\n\t\t\t\tself.m.rightAngleMoni.observe(alphaPr*360./(2*pi), self.sim.now())\n\t\t\t\ttraveltime+=self.m.timeConstants['maincrane']+abs(thPr-th)/float(self.m.velocities['angMachine'])\n\t\t\t\t#so, maincrane has moved.. time for the smaller cranes. Move them one by one.\n\t\t\t\tfor arg in [(abs(alpha-alphaPr), abs(r1-r1Pr)), (abs(beta-betaPr), abs(r2-r2Pr))]:\n\t\t\t\t\ttime=self.m.timeConstants['subcrane']+max(arg[0]/float(self.m.velocities['angCranes']), arg[1]/float(self.m.velocities['radial']))\n\t\t\t\t\ttraveltime+=time\n\t\t\telse: #1a\n\t\t\t\toldCyl=self.m.getCylindrical(self.lastPos)\n\t\t\t\tdTh=abs(oldCyl[1]-self.posCyl[1])\n\t\t\t\tdr=abs(oldCyl[0]-self.posCyl[0])\n\t\t\t\ttraveltime+=self.m.timeConstants['maincrane']+max(dTh/self.m.velocities['angMachine'], dr/self.m.velocities['radial'])\n\t\t\tself.lastPos=self.pos #the way it should be..\n\t\t\tself.moveEvent.signal() #tell the other head that a movement has occured.\n\t\tself.timeConsumption['crane movement']+=traveltime\n\t\treturn traveltime", "def run(self):\n ball = memory.world_objects.getObjPtr(core.WO_BALL)\n if not ball.seen or not ball.fromTopCamera:\n return\n \n # Ball coordinates\n ball_x, ball_y = ball.imageCenterX, ball.imageCenterY\n \n # Calculate forward velocity\n ball_distance = ball.visionDistance / 1000\n print('Ball distance: {}'.format(ball_distance))\n ball_distance = min(ball_distance, DISTANCE_THRESHOLD)\n \n # Cache the ball distances\n PursueBall.ball_distances = (PursueBall.ball_distances + [ball_distance])[-30:]\n print('Ball distances: {}'.format(PursueBall.ball_distances))\n slope = sum(PursueBall.ball_distances[-10:])/10 - sum(PursueBall.ball_distances[:10])/10\n print('Slope: {} - {} = {}'.format(sum(PursueBall.ball_distances[-10:]) / 10,\n sum(PursueBall.ball_distances[:10]) / 10,\n slope))\n print('Input: {}'.format(1 / slope if slope else 1))\n \n \n # Get the maximum velocity to be 1\n forward_vel = ball_distance * DISTANCE_CONSTANT\n forward_vel *= MAX_FORWARD_VELOCITY\n forward_vel = max(MIN_FORWARD_VELOCITY, forward_vel)\n print('forward velocity: {}'.format(forward_vel))\n \n # Calculate sideways velocity\n angular_vel = -(ball_x-160.0) / 160.0 * MAX_ANGULAR_VELOCITY\n print('Sideways Amount: {}'.format(angular_vel))\n \n commands.setWalkVelocity(forward_vel, 0, angular_vel)", "def update_remote_system_command(client: Client, args: Dict[str, Any]) -> str:\n parsed_args = UpdateRemoteSystemArgs(args)\n delta = parsed_args.delta\n remote_incident_id = parsed_args.remote_incident_id\n\n try:\n if parsed_args.incident_changed:\n incident_status = parsed_args.inc_status\n demisto.debug(f'Performing update_remote_system command with incident id {remote_incident_id}, incident status'\n f' {incident_status} and delta {delta}.')\n update_remote_alert(client, delta, incident_status, remote_incident_id)\n\n else:\n demisto.debug(f\"Skipping the update of remote incident {remote_incident_id} as it has not changed.\")\n\n except Exception as e:\n demisto.error(f'Error in Prisma Cloud V2 outgoing mirror for incident {remote_incident_id}. '\n f'Error message: {str(e)}')\n\n return remote_incident_id", "def send_roi_coords(self):\n new_roi_coords = [group.get_roi_coords() for group in self.roi_groups]\n self.signal_status_message.emit('Updated ROI coords.: {}'.format(new_roi_coords))\n self.signal_roi_coords.emit(new_roi_coords)", "def update():", "def update():", "def run(self):\n rospy.Subscriber('qualisys/cascar/odom', Odometry, self.qualisys_callback, queue_size=1)\n rospy.Subscriber('sensor/imu', CarSensor, self.imu_callback, queue_size=1)\n rospy.Subscriber('sensor/cascar', CarSensor, self.cascar_callback, queue_size=1)\n rate = rospy.Rate(self.fs)\n while not self.pose_initialized:\n # If we're using qualisys, wait for the first measurement to use as initial pose.\n rate.sleep()\n \n while not rospy.is_shutdown():\n # Collect pose data and publish continuously.\n pose = (self.X[0],\n self.X[1],\n self.X[2],\n self.v_in*sym.cos(self.X[2]),\n self.v_in*sym.sin(self.X[2]),\n self.w_in)\n\n current_time = rospy.Time.now()\n self.broadcast_odometry_message(pose, current_time)\n \n self.time_update([self.v_in, self.w_in])\n self.v_in = 0\n\n rate.sleep()", "def moving(self,newX,newY):\n LOGGER.debug(\"{} moved to {} | {}\".format(self.physic_id,newX,newY))\n lazzyUpdate().sendTrame(self.physic_id,{\"coordX\":newX,\"coordY\":newY})", "def test_get_actPos(self):\n positions = []\n for app_num, servo_type in app_nr.items():\n self.cmd_num += 1\n # Retrieve the positions directly from the server (without ACS)\n command = headers[0] + commands[4] + ':%d=' %self.cmd_num + str(app_num) + closers[0]\n\n found = False\n while(not found):\n self.sockobj.sendall(command)\n data = \"\"\n while(True):\n data += self.sockobj.recv(1)\n if closers[0] in data:\n if ':%d=' %self.cmd_num in data:\n found = True\n break\n else:\n data = \"\"\n\n if data.startswith(\"!NAK\"):\n continue\n pos_obj = self.__dict__[servo_type]._get_actPos()\n acs_positions, completion = pos_obj.get_sync()\n if(completion.code):\n print \"\\nError code found in getActPos...\"\n continue\n self.assertEqual(len(acs_positions), number_of_axis[servo_type])\n try:\n # Retrieve the message header\n sent, answer = data.split(\">\")\n positions_list = answer.split(',')[1:]\n positions = [float(position.strip()) for position in positions_list]\n except:\n continue\n\n # If the servo is a SRP, you have to covert the real positions to virtual positions\n if servo_type == \"SRP\":\n r2v_cmd = \"../bin/real2virtual\"\n for position in positions:\n r2v_cmd += \" %s\" %position\n put, out = os.popen4(r2v_cmd)\n \n positions = []\n try:\n for line in out.readlines():\n if line.startswith('x'):\n positions.append(float(line.split(\"=\")[1].strip()))\n except:\n positions = [0] * number_of_axis[servo_type]\n\n for idx in range(number_of_axis[servo_type]):\n self.assertAlmostEqual(acs_positions[idx], positions[idx], places=1)", "def set_control_commands(self, ref_state, ref_ind):\n super(DummyVehicle, self).set_control_commands(ref_state, ref_ind)\n safety_distance = 20.\n full_stop_distance = 15.\n\n\n self.check_if_overtake_is_finished()\n\n # Only continue from this point if there are some radar sensings\n if not numpy.any(self.radar_readings[0, :]):\n return\n\n\n min_dist = numpy.min(self.radar_readings[0, :])\n # Set speed.\n if min_dist < full_stop_distance:\n desired_speed = 0.\n self.overtake_begin_counter = 0\n\n elif min_dist < safety_distance:\n desired_speed = self.cruising_speed * min_dist / safety_distance\n else:\n desired_speed = self.cruising_speed\n\n # Every subclass can\n if not self.overtake:\n if self.check_if_overtake(min_dist):\n if self.check_if_safe_to_overtake():\n rospy.logwarn(str(self.vehicle_id) + ' start overtaking')\n self.overtake = True\n\n self.commands['speed'] = desired_speed", "async def after_tick(self):\n\n msg = Message(\n to=self.agent.factory_jid,\n body=self.agent.position.to_json()\n )\n msg.set_metadata(\"performative\", \"inform\")\n await self.send(msg)", "def sendMouvementStep(positions):\n\tprogMode(True) # Active le couple des servos\n\tfor servo in positions: # Pour chaque servo\n\t\t# Ecrit la position dans le registre de chaque servo\n\t\taxDriver.setPosition(servo, positions[servo])\n\taxDriver.action(axDriver.BROADCASTID) # Tous les servos bougent", "def print_position(self):\n\n while True:\n lat = self.vehicle.location.global_relative_frame.lat\n lon = self.vehicle.location.global_relative_frame.lon\n try:\n self.pathFile.write('{0},{1}'.format(lat, lon))\n time.sleep(1)\n except:\n break\n #print ('{0},{1}'.format(lat, lon))", "def __init__(self):\n self.current_state_g = State()\n self.current_pose_g = Odometry()\n self.correction_vector_g = Pose()\n self.local_offset_pose_g = Point()\n self.waypoint_g = PoseStamped()\n\n self.current_heading_g = 0.0\n self.local_offset_g = 0.0\n self.correction_heading_g = 0.0\n self.local_desired_heading_g = 0.0\n\n self.ns = rospy.get_namespace()\n if self.ns == \"/\":\n rospy.loginfo(CBLUE2 + \"Using default namespace\" + CEND)\n else:\n rospy.loginfo(CBLUE2 + \"Using {} namespace\".format(self.ns) + CEND)\n\n self.local_pos_pub = rospy.Publisher(\n name=\"{}mavros/setpoint_position/local\".format(self.ns),\n data_class=PoseStamped,\n queue_size=10,\n )\n\n self.currentPos = rospy.Subscriber(\n name=\"{}mavros/global_position/local\".format(self.ns),\n data_class=Odometry,\n queue_size=10,\n callback=self.pose_cb,\n )\n\n self.state_sub = rospy.Subscriber(\n name=\"{}mavros/state\".format(self.ns),\n data_class=State,\n queue_size=10,\n callback=self.state_cb,\n )\n\n rospy.wait_for_service(\"{}mavros/cmd/arming\".format(self.ns))\n\n self.arming_client = rospy.ServiceProxy(\n name=\"{}mavros/cmd/arming\".format(self.ns), service_class=CommandBool\n )\n\n rospy.wait_for_service(\"{}mavros/cmd/land\".format(self.ns))\n\n self.land_client = rospy.ServiceProxy(\n name=\"{}mavros/cmd/land\".format(self.ns), service_class=CommandTOL\n )\n\n rospy.wait_for_service(\"{}mavros/cmd/takeoff\".format(self.ns))\n\n self.takeoff_client = rospy.ServiceProxy(\n name=\"{}mavros/cmd/takeoff\".format(self.ns), service_class=CommandTOL\n )\n\n rospy.wait_for_service(\"{}mavros/set_mode\".format(self.ns))\n\n self.set_mode_client = rospy.ServiceProxy(\n name=\"{}mavros/set_mode\".format(self.ns), service_class=SetMode\n )\n\n rospy.wait_for_service(\"{}mavros/cmd/command\".format(self.ns))\n\n self.command_client = rospy.ServiceProxy(\n name=\"{}mavros/cmd/command\".format(self.ns), service_class=CommandLong\n )\n rospy.loginfo(CBOLD + CGREEN2 + \"Initialization Complete.\" + CEND)", "def update(self):\n self._state = None\n self._attributes = {}\n\n vehicles = self._api.get_vehicles(self._latitude, self._longitude)\n scooter = {}\n\n if vehicles:\n for vehicle in vehicles:\n location_vehicle = (vehicle[\"location\"][0], vehicle[\"location\"][1])\n location_hass = (self._latitude, self._longitude)\n vehicle[\"distance\"] = distance(location_vehicle, location_hass).m\n\n scooter = sorted(vehicles, key=lambda item: item[\"distance\"])[0]\n\n if scooter:\n self._state = round(scooter[\"distance\"])\n self._attributes[ATTR_LATITUDE] = round(scooter[\"location\"][0], 5)\n self._attributes[ATTR_LONGITUDE] = round(scooter[\"location\"][1], 5)\n self._attributes[ATTR_BATTERY_LEVEL] = round(scooter[\"battery\"])\n self._attributes[ATTR_ATTRIBUTION] = ATTRIBUTION", "def tick(self):\n self.world.tick()\n # self.world_snapshot = self.world_queue.get(timeout=60.0)\n # Update data structures for the current frame.\n current_actors = set(\n [vehicle.id for vehicle in self.world.get_actors().filter('vehicle.*')])\n self.spawned_actors = current_actors.difference(self._active_actors)\n self.destroyed_actors = self._active_actors.difference(current_actors)\n self._active_actors = current_actors\n\n world_snapshot = self.world.get_snapshot()\n for v in current_actors:\n line = '{:06},{},{:d}' + ',{:.3f}' * 9\n tf = world_snapshot.find(v).get_transform()\n vehicle = self.world.get_actor(v)\n extent = vehicle.bounding_box.extent\n type_id = vehicle.type_id\n line = line.format(world_snapshot.frame, type_id, v,\n tf.location.x, tf.location.y, tf.location.z,\n tf.rotation.roll, tf.rotation.pitch, tf.rotation.yaw,\n extent.x * 2, extent.y * 2, extent.z * 2)\n self.file_writer.writerow(line.split(','))\n\n self.synchronize_sensors()", "def newOdom(msg, args):\n\tglobal x\n\tglobal y\n\tglobal theta\n\n\tglobal goal\n\tglobal path_itr\n\n\tpath_list = args[0]\n\tcmd_vel_pub = args[1]\n\n\tx = msg.pose.pose.position.x\n\ty = msg.pose.pose.position.y\n\n\trot_q = msg.pose.pose.orientation\n\troll, pitch, theta = euler_from_quaternion((rot_q.x, rot_q.y, rot_q.z, rot_q.w))\n\n\tif utils.euclideanDistance((goal.x, goal.y), (x, y)) < 0.2:\n\t\tgoal.x, goal.y = path_list[path_itr].getXYCoords()\n\t\tpath_itr += 1\n\n\t\tif path_itr >= len(path_list):\n\t\t\tspeed = Twist()\n\t\t\tspeed.linear.x = 0.0\n\t\t\tspeed.angular.z = 0.0\n\t\t\tcmd_vel_pub.publish(speed)\n\t\t\trospy.signal_shutdown(\"Reached the last node of the path_list!!! Yayyy!\")", "def update_world(self):\n pass", "def updateMap(self) :\n\t\tself.dot.setPos( \\\n\t\t (self.avatarNP.getX()/(self.modelSizeX))*0.79+0.4, 0, \\\n\t\t (self.avatarNP.getY()/(self.modelSizeY))*0.79+0.21)\n\t\tfor id in self.remoteMap:\n\t\t\tself.remoteMap[id][3].setPos( \\\n\t\t\t\t(self.remoteMap[id][0].getX() / \\\n\t\t\t\t\tself.modelSizeX)*0.79+0.4, \\\n\t\t\t\t0, (self.remoteMap[id][0].getY() / \\\n\t\t\t\t\tself.modelSizeY)*0.79+0.21)" ]
[ "0.64087474", "0.61778766", "0.60122633", "0.5960585", "0.5868954", "0.58094513", "0.5795152", "0.5743932", "0.56760687", "0.56449115", "0.5635973", "0.5577554", "0.5568342", "0.5534826", "0.5531178", "0.55111843", "0.5509245", "0.5506559", "0.5487788", "0.5474574", "0.5461832", "0.5451264", "0.5440252", "0.54344565", "0.5411915", "0.5397112", "0.53911203", "0.5386832", "0.5386298", "0.53811353", "0.5380822", "0.5379727", "0.5379119", "0.53749174", "0.53652954", "0.53562105", "0.5355404", "0.53530675", "0.5347312", "0.5344904", "0.5328667", "0.5320669", "0.5314855", "0.53109324", "0.53063154", "0.53043747", "0.53038555", "0.52950096", "0.52913934", "0.5291301", "0.5287906", "0.5260506", "0.52577907", "0.5255098", "0.5252329", "0.5251468", "0.5244943", "0.5237088", "0.52370274", "0.5232386", "0.52242595", "0.5223071", "0.52191746", "0.5216335", "0.5214189", "0.5213989", "0.52137643", "0.5211952", "0.5211142", "0.5202814", "0.5190275", "0.51896197", "0.51832914", "0.5179829", "0.5163996", "0.5162962", "0.5160743", "0.51511896", "0.51476043", "0.5143824", "0.51432836", "0.514007", "0.51359206", "0.51251", "0.512284", "0.5122732", "0.5121918", "0.5121918", "0.51202327", "0.51190245", "0.51153857", "0.5114984", "0.5095249", "0.50951695", "0.5091156", "0.5089685", "0.5087807", "0.50839406", "0.5083189", "0.50692314", "0.50688547" ]
0.0
-1
A simple timevarying current field in the x direction based on a sinusoid.
def sinusoid_current(self): t = self.get_time() # time in seconds offset_time = t - self.start_time - self.delay_time # time offset by start/delay time # Don't start the currents until t > start + 10 if t < self.start_time + self.delay_time: print("Waiting for delay time to pass: {}/{}".format(t, self.start_time + self.delay_time)) velocity = 0 else: velocity = self.max_vel * sin(self.w*offset_time) # current velocity in m/s vertical_angle = 0.0 # vertical angle in radians # current velocity must be positive, so change horizontal angle # such that if velocity < 0, push vehicle to left, else right horizontal_angle = pi if velocity < 0 else 0 speed = abs(velocity) return speed, horizontal_angle, vertical_angle
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, x):\r\n f = math.sin(self.phase_offset + self.angular_frequency*x)\r\n return self.amplitude_offset + self.amplitude*f", "def tsx(self):\n\n self.x = self.sp\n self.set_zn(self.x)", "def position(t, x, y):\n return x * exp(-t * y) * sin(2 * pi * t)", "def update(self, dt):\n sin_noise = self.amplitude*np.sin (3.14/self.char_time*self.T)\n self._x = np.zeros((2,8))\n self._x[0,4:]= sin_noise\n self.T+=dt #timestep", "def position1(t):\n return 100.0 * exp(-t * 1.0 / 10.0) * sin(2 * pi * t)", "def xsin(x):\n return x + tf.sin(x)", "def temp_serial_placeholder():\n return [sin(current_time_milli()/1000), cos(current_time_milli()/1000)]", "def time_function(t):\n\n omega = np.pi\n return np.sin(omega * t) + np.sin(10 * omega * t) + np.sin(20 * omega * t)", "def observation(self, x):\n\t\tx = self.normalise(x)\n\t\tx_STMS, x_STPS = self.polar_analysis(x)\n\t\tx_STMS_STPS = tf.concat([x_STMS, x_STPS], axis=-1)\n\t\treturn x_STMS_STPS, x_STMS_STPS", "def sin(x):\n raise NotImplementedError", "def set_global_coordinates(self):\r\n\r\n # alias:\r\n F = Turbine.F\r\n t = Turbine.t\r\n \r\n self.x = -F*np.sin(t) + self.x0\r\n self.y = +F*np.cos(t) + self.y0", "def position(x,v,t,a):\n return x + v*t + 0.5*a*t**2 # pos = initial position + velocity*time + .5(acceleration)(time squared)", "def sinc(x):\n y = pi* where(x == 0, 1.0e-20, x)\n return sin(y)/y", "def tick(self):", "def forward(self, x):\r\n self.x = (self.x+(x*(math.cos(self.dir))))\r\n self.y = (self.y+(x*(math.sin(self.dir))))\r\n return (self.x, self.y)", "def psi(x):\n return np.sin(x)", "def fiber_gets_stimulated(fiber_no, frequency, current_time):\n\n # determine motor unit\n alpha = 1.0 # 0.8\n mu_no = 0\n \n # determine if fiber fires now\n index = int(np.round(current_time * frequency))\n n_firing_times = np.size(firing_times,0)\n \n #if firing_times[index % n_firing_times, mu_no] == 1:\n #print(\"fiber {} is mu {}, t = {}, row: {}, stimulated: {} {}\".format(fiber_no, mu_no, current_time, (index % n_firing_times), firing_times[index % n_firing_times, mu_no], \"true\" if firing_times[index % n_firing_times, mu_no] == 1 else \"false\"))\n print(\"fiber {} is mu {}, t = {}, row: {}, stimulated: {} {}\".format(fiber_no, mu_no, current_time, (index % n_firing_times), firing_times[index % n_firing_times, mu_no], \"true\" if firing_times[index % n_firing_times, mu_no] == 1 else \"false\"))\n \n return firing_times[index % n_firing_times, mu_no] == 1", "def sinwave(scene):\n # create an empty homogeneous transformation\n matrix = np.eye(4)\n # set Y as cos of time\n matrix[1][3] = np.cos(time.time()) * 2\n # set Z as sin of time\n matrix[2][3] = np.sin(time.time()) * 3\n\n # take one of the two spheres arbitrarily\n node = s.graph.nodes_geometry[0]\n # apply the transform to the node\n scene.graph.update(node, matrix=matrix)", "def michalewicz(x):\n return x * sin(10 * pi * x) + 1.0", "def InSineFunction_getValueAt(*args):\n return _osgAnimation.InSineFunction_getValueAt(*args)", "def advancePosition(self,time):\n velocity = self.getVelocity()\n return self.x + time*velocity", "def sin(x):\n return 0.0", "def observation(self, x):\n\t\tx = self.normalise(x)\n\t\tx_STMS, x_STPS = self.polar_analysis(x)\n\t\treturn x_STMS, x_STPS", "def position2(t):\n return 98.0 * exp(-t * 2.0 / 13.0) * sin(2 * pi * t)", "def f(x):\n return (2.0*math.sin(10.0*x+1.0)+1.0)", "def wavefunction(self, x):\n return ( float(1) / math.pi**(float(1)/4)) * math.exp( x**2 / float(-2))", "def fsim( self, x , t = 0 ):\n \n u = self.t2u( t )\n dx = self.f( x, u, t)\n \n return dx", "def pos(self, time):\n if (time < self.ti):\n t = 0\n elif (time > self.tf):\n t = self.tf - self.ti\n else:\n t = time - self.ti\n return self.a0 + self.a1 * t + self.a2 * pow(t, 2) + self.a3 * pow(t, 3) + self.a4 * pow(t, 4) + self.a5 * pow(t, 5)", "def sine(x, period, var):\r\n return var*np.sin((2*np.pi/period)*x)", "def t0(self):\n return self._time_axis.start", "def get_frame_x(self, i: int) -> Tuple[int]:\n return (0, self.pendulum1.x[i], self.pendulum2.x[i])", "def x(self):\r\n return self.position.x", "def vi1(t):\n u_t = 1*(t>0)\n return (np.sin(2000*np.pi*t)+np.cos(2e6*np.pi*t)) * u_t", "def g_0(thetas,aa,time):\n\treturn 2.0*(1.0/sin(thetas)) * ((1. +3.*aa)*(1. + 3.*aa)*time*time*time*time + 2.*(1. + 2.0*aa + 2.0*aa + 3.0*aa*aa) *time*time + 2.0 * (1.0+aa) * (1.0 + aa))", "def rhs(x, t):\n\n return - np.sin(np.pi * x) * (np.sin(t) - 1 * np.pi ** 2 * np.cos(t))", "def _compute_e_S1S2(self, x):\r\n psi = x[PSI_IDX]\r\n return np.array([-np.sin(psi), np.cos(psi)])", "def shoot(self):\n e = self.energy()\n y = self.rap()\n sqrt_pt2_m2 = e / math.cosh(y)\n pz = sqrt_pt2_m2 * math.sinh(y)\n m = self.mass()\n pt = math.sqrt( sqrt_pt2_m2**2 - m**2 )\n phi = self.phi()\n px = pt * math.cos(phi);\n py = pt * math.sin(phi);\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def f(t,y):\n return np.array([lam*y[0] + (1.0-lam)*np.cos(t) - (1.0+lam)*np.sin(t)])", "def InOutSineFunction_getValueAt(*args):\n return _osgAnimation.InOutSineFunction_getValueAt(*args)", "def f(t,y):\n return np.array([lam*y[0] + (1.0-lam)*np.cos(t) - (1.0+lam)*np.sin(t)])", "def x(self):\n return self.coords[0]", "def pyts_time_series(x):\n p = circle(x, r=0.2, a=0, b=0, x_lim=(-0.2, 0.2))\n y = - circle(x, r=0.2, a=0.4, b=0, x_lim=(0.2, 0.6))\n t = 0\n s = circle(x, r=0.125, a=0.9, b=-0.025, x_lim=(0.775, 0.9))\n s += 0.1 * (x >= 0.9) * (x <= 1.2)\n return p + y + t + s", "def x ( self ) :\n return self.xvar", "def f(x):\r\n\treturn np.sin(x)", "def step(self, sys):\n self._position_update(sys, 0.5*self.dt)\n self._momentum_update(sys, self.dt)\n self._position_update(sys, 0.5*self.dt)", "def x(self):\n return self.x", "def equation_of_time(cls, date):\n offset = cls.sine(cls.mean_position(date, cls.ANOMALISTIC_YEAR))\n equation_sun = (offset * angle(57, 18, 0) * (14/360 - (abs(offset) / 1080)))\n return ((cls.daily_motion(date) / 360) * (equation_sun / 360) * cls.SIDEREAL_YEAR)", "def tick():\n global current\n current += 1", "def tick():\n global current\n current += 1", "def shoot(self):\n eta = self.eta()\n theta = 2 * math.atan(math.exp(-eta));\n e = self.energy()\n m = self.mass()\n p = math.sqrt( e**2 - m**2 )\n pz = p * math.cos(theta)\n pt = p * math.sin(theta)\n phi = self.phi()\n px = pt * math.cos(phi)\n py = pt * math.sin(phi)\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def TSX(self, *_):\n self.reg.X = self.reg.S\n self.reg.N = self.reg.X << 7\n self.reg.Z = self.reg.X == 0", "def f4(x):\n return sin(x)/x", "def step(self, sys):\n self._momentum_update(sys, 0.5*self.dt)\n self._position_update(sys, 0.5*self.dt)\n self._OU_update(sys, self.dt)\n self._position_update(sys, 0.5*self.dt)\n self._momentum_update(sys, 0.5*self.dt)", "def f(x):\n\treturn np.sin(x / 5.0) * np.exp(x / 10.0) + 5 * np.exp(-x / 2.0)", "def target_state(self, s):\n # YOUR CODE HERE\n if s > self.total_length:\n s = self.total_length\n\n theta = s/self.total_length*self.angle\n \n x = np.sin(theta)*self.radius\n if self.left_turn:\n y = (1 - np.cos(theta))*self.radius\n else:\n y = -(1 - np.cos(theta))*self.radius\n\n if not self.left_turn:\n theta = -theta\n\n theta = (theta+np.pi)%(np.pi*2) - np.pi\n # theta = theta%(np.pi*2)\n return np.array([x, y, theta])\n # return np.append(np.dot(self.g, np.array([x, y, 1]))[:2], theta + self.start_theta)", "def shoot(self):\n e = self.energy()\n m = self.mass()\n p = math.sqrt( e**2 - m**2 )\n theta = self.theta()\n pz = p * math.cos(theta)\n pt = p * math.sin(theta)\n phi = self.phi()\n px = pt * math.cos(phi)\n py = pt * math.sin(phi)\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def theta_v_time():\n pass", "def sine(x, period=10.0, start_phase=0.0, name=''):\n x = x.astype(np.float)\n variables = {\n 'function': sine, 'period': period, 'start_phase': start_phase}\n y = np.sin(2*np.pi*(x / period) + start_phase)\n return packer(x, y, variables, name=name)", "def f( self , x , u , t ):\n \n dx = np.zeros(self.n) # State derivative vector\n \n ################################################\n # Place holder: put the equations of motion here\n raise NotImplementedError\n ################################################\n \n return dx", "def bomb_vector(self):\n\n\t\tif self.b_offset == 0:\n\t\t\top = sin\n\t\telse:\n\t\t\top = cos\n\n\t\tself.y -= self.speed\n\t\tself.rect.y = self.y\n\t\t# MMMMMMMMMMMMMMMMMMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATHS\n\t\tself.x = int((self.g_settings.screen_height/2) + self.amplitude*op(self.frequency*((float(self.y)/self.g_settings.screen_width)*(2*pi) + (self.speed*time()))))\n\t\tif self.b_offset == 0:\n\t\t\tself.rect.x = self.x + self.position_x - 16\n\t\telif self.b_offset == 1:\n\t\t\tself.rect.x = self.x + self.position_x + 16\n\t\tself.screen.blit(self.image, self.rect)", "def update_current(self):\n velocity, horizontal_angle, vertical_angle = self.current_function()\n self.set_current_velocity(velocity, horizontal_angle, vertical_angle)", "def getValueAt(*args):\n return _osgAnimation.InSineFunction_getValueAt(*args)", "def x(self) -> float:\n return self.data[0]", "def note(freq):\n data = np.sin(2.0 * np.pi * freq * t) * amp\n return data", "def shoot(self):\n eta = self.eta()\n theta = 2 * math.atan(math.exp(-eta));\n pt = self.pt()\n p = pt / math.sin(theta)\n phi = self.phi()\n px = pt * math.cos(phi)\n py = pt * math.sin(phi)\n pz = p * math.cos(theta)\n m = self.mass()\n e = math.sqrt( p**2 + m**2 )\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def snakeh2(x):\n return x + (2 * tf.sin(0.5 * x) * tf.sin(0.5 * x))", "def sin(x):\r\n # see decorator for function body\r", "def getValueAt(*args):\n return _osgAnimation.InOutSineFunction_getValueAt(*args)", "def x(self):\n return self._turtle.xcor()", "def x(self):\n return self._turtle.xcor()", "def fx(self, x):\n A = np.eye(3) + self.dt * np.array([[0, 1, 0], [0, 0, 0], [0, 0, 0]])\n xp = A @ x\n return xp", "def f(x):\r\n return x * np.sin(x)", "def waypoints(t):\n global x\n xx = x + ((2 * PI)/t)\n yy = 2*(math.sin(xx))*(math.sin(xx/2))\n return [xx, yy]", "def plot_x(t, x):\n plt.figure()\n plt.plot(t, x)\n plt.title(\"Vertical position of the skydiver as a function of time\")\n plt.xlabel(\"Time t [s]\")\n plt.ylabel(\"Height [m]\")\n plt.savefig('Parachute_position.png')", "def lastTick():", "def trajectory1(self):\r\n\r\n trackt = [] # particle trajectory,\r\n trackx = [] # particle trajectory\r\n an = [] # analitical s**2 + x**2 = t**2\r\n s1 = [] # s = 10; s = 0, light\r\n s2 = [] # s = 20;\r\n s3 = [] # s = 40;\r\n for i in range(0, len(self.dt.obs.obt_g)):\r\n trackt.append(float(i))\r\n trackx.append(self.dt.x[i])\r\n an.append(math.sqrt(float(i) ** 2 + self.dt.x[i] ** 2))\r\n s1.append(math.sqrt(1.0 ** 2 + self.dt.x[i] ** 2))\r\n s2.append(math.sqrt(2.0 ** 2 + self.dt.x[i] ** 2))\r\n s3.append(math.sqrt(4.0 ** 2 + self.dt.x[i] ** 2))\r\n\r\n # plots:\r\n\r\n (fig, ax) = plt.subplots() # figsize=(7,5)\r\n\r\n # trajectory\r\n\r\n ax.plot(\r\n trackx,\r\n trackt,\r\n marker='+',\r\n linewidth=1,\r\n linestyle='-',\r\n color='green',\r\n label='treck',\r\n )\r\n\r\n # measurement t\r\n # ax.plot(self.dt.x, self.dt.t, marker=\"+\", linestyle=\" \", color=\"blue\", label=\"result of measurement\")\r\n\r\n ax.plot(\r\n self.dt.x,\r\n self.dt.t,\r\n marker='o',\r\n linestyle=' ',\r\n color='black',\r\n label='result of measurement',\r\n )\r\n\r\n # analitical t\r\n\r\n ax.plot(self.dt.x, an, linestyle='-', color='red',\r\n label='continuum')\r\n\r\n # light trajectory\r\n\r\n ax.plot(trackx, trackx, linestyle='-', color='yellow',\r\n label='s=0 (light)')\r\n\r\n # s(x) curves\r\n\r\n ax.plot(\r\n trackx,\r\n s1,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=1.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s2,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=2.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s3,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=4.0',\r\n )\r\n\r\n # error of measurement t\r\n\r\n ax.errorbar(self.dt.x, self.dt.t, fmt='k ', yerr=self.dt.t_err)\r\n\r\n # signature on the horizontal x-axis\r\n\r\n ax.set_xlabel('x in metres')\r\n xm = -1.0\r\n for i in range(len(self.dt.x)):\r\n if self.dt.x[i] > xm:\r\n xm = self.dt.x[i]\r\n stepx = round(xm / float(len(self.dt.x)), 1)\r\n xm = round(xm + stepx, 1)\r\n ax.set_xlim([0.0, xm])\r\n\r\n # signature on vertical y axis\r\n\r\n ax.set_ylabel('t in metres of light time ')\r\n ym = -1.0\r\n for i in range(len(self.dt.t)):\r\n if self.dt.t[i] > ym:\r\n ym = self.dt.t[i]\r\n stepy = round(ym / float(len(self.dt.t)), 1)\r\n ym = round(ym + stepy, 1)\r\n ax.set_ylim([0.0, ym])\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on x)\r\n\r\n locatorx = matplotlib.ticker.MultipleLocator(base=stepx)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.xaxis.set_major_locator(locatorx)\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on y)\r\n\r\n locatory = matplotlib.ticker.MultipleLocator(base=stepy)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.yaxis.set_major_locator(locatory)\r\n\r\n ax.grid()\r\n\r\n # show legend\r\n\r\n ax.legend(loc='upper left')\r\n\r\n # show drawing\r\n\r\n plt.show()", "def tick(self, dt):\n self.x += dt * self.x_speed\n self.y += dt * self.y_speed", "def addField(self,time,direction='x'):\n\n shape = self.pulse(time) \n\n if direction.lower() == 'x':\n self.mol.F += self.field*shape*(self.mol.M[0])\n elif direction.lower() == 'y':\n self.mol.F += self.field*shape*(self.mol.M[1])\n elif direction.lower() == 'z':\n self.mol.F += self.field*shape*(self.mol.M[2])\n self.mol.orthoFock()", "def OutSineFunction_getValueAt(*args):\n return _osgAnimation.OutSineFunction_getValueAt(*args)", "def desplazamientox(tiempo,velocidad):\r\n #se realiza un operacion para encontrar el el desplzamiento horizaontal\r\n x=tiempo*velocidad\r\n #se regresa el valor de x\r\n return x", "def __get_x__(self):\n return self.Direction['x']", "def updatePos(self):\n self.timeDriving +=1\n self.pos[0] += self.vx\n self.pos[1] += self.vy", "def _sind(v):\n return math.sin(math.radians(v))", "def shoot(self):\n pt = self.pt()\n assert pt >= 0\n m = self.mass()\n assert m >= 0\n sqrt_pt2_m2 = math.sqrt( pt**2 + m**2 )\n y = self.rap()\n e = sqrt_pt2_m2 * math.cosh(y)\n pz = sqrt_pt2_m2 * math.sinh(y)\n phi = self.phi()\n px = pt * math.cos(phi);\n py = pt * math.sin(phi);\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def get_speed_x(self):\r\n return self.__X_speed", "def tick(self):\n pass", "def tick(self):\n pass", "def posture_sensor(axis):\n\n\treturn 0.0", "def NonLinPendulum(y, t, a, q, F, drivAngFreq):\n \n angle = y[0]\n omega = y[1]\n return [omega, -a * np.sin(angle) - q*omega + F*np.sin(drivAngFreq * t)]", "def tick(self, tick):\n pass", "def tick(self, tick):\n pass", "def step(self, dt):\n return Vector(self.P.x + dt*self.V.x, self.P.y + dt*self.V.y)", "def sinh(x):\n raise NotImplementedError", "def sfunc(self,x,y):\n return np.exp(-(x-self.x_0)**2.0-(y-self.y_0)**2.0)", "def set_state(self, time, x):\n\n self.x = np.copy(x)\n self.t = np.copy(float(time))", "def Output(self,howmanytimes = 100000):\n #self.T = self.GetMatrixT()\n #self.inputVector = self.CalculateFirstInputVector()\n self.howmanytimes = howmanytimes\n for i in range(self.howmanytimes):\n self.phiNew = self.T[0]*self.inputVector[0] + self.T[1]*self.inputVector[1]\n self.thetaNew = self.T[2]*self.inputVector[0] + self.T[3]*self.inputVector[1]\n self.Store(self.thetaNew)\n self.outputVector = [self.phiNew, self.thetaNew]\n self.inputVector = self.outputVector\n self.timeAxis = [self.dt*i for i in range(self.howmanytimes +1)]\n return self.thetaAxis, self.timeAxis", "def shoot(self):\n theta = self.theta()\n pt = self.pt()\n p = pt / math.sin(theta)\n phi = self.phi()\n px = pt * math.cos(phi)\n py = pt * math.sin(phi)\n pz = p * math.cos(theta)\n m = self.mass()\n e = math.sqrt( p**2 + m**2 )\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def _sincfunc(x, dx, dampfac=3.25):\n if dx != 0.0:\n xx = (x+dx)*np.pi #- cache shifted array for 30% faster evals\n return np.exp( -(xx/(dampfac*np.pi))**2 ) * np.sin(xx) / xx\n else:\n xx = np.zeros(len(x))\n xx[len(x)//2] = 1.0\n return xx", "def get_current(self):\n return self.x", "def dynamics(x,Earth):\r\n\r\n # precompute a few terms to reduce number of operations\r\n r = norm(x[0:3])\r\n Re_r_sqr = 1.5*Earth.J2*(Earth.R/r)**2\r\n five_z_sqr = 5*x[2]**2/(r**2)\r\n\r\n # two body and J2 acceleration together\r\n accel = (-Earth.mu/(r**3))*np.array([x[0]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[1]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[2]*(1 - Re_r_sqr*(five_z_sqr - 3))])\r\n\r\n return np.array([x[3],x[4],x[5],accel[0],accel[1],accel[2]])" ]
[ "0.6198477", "0.6151556", "0.6098946", "0.5898149", "0.5895894", "0.5881547", "0.5811019", "0.57994246", "0.57935405", "0.5681556", "0.5660104", "0.5616797", "0.5612879", "0.56064534", "0.55949235", "0.5590392", "0.5557532", "0.5556586", "0.55441815", "0.5528075", "0.552043", "0.55184263", "0.5489093", "0.5477844", "0.54350954", "0.5430619", "0.54215884", "0.54115325", "0.5397983", "0.53978276", "0.53918797", "0.5379718", "0.5360469", "0.53590107", "0.5342911", "0.53376776", "0.53350365", "0.53263575", "0.53229403", "0.52987355", "0.52983326", "0.52960885", "0.5289926", "0.528762", "0.5282358", "0.5277124", "0.52716905", "0.52689826", "0.52689826", "0.5262048", "0.5256599", "0.52553177", "0.52500683", "0.52442396", "0.5237858", "0.5237103", "0.52363014", "0.5227635", "0.5221394", "0.52162385", "0.5208782", "0.5208775", "0.52061033", "0.520453", "0.5198237", "0.5197546", "0.51938057", "0.5191106", "0.51826614", "0.51826614", "0.51773334", "0.51753426", "0.5170416", "0.5170331", "0.5166246", "0.51644725", "0.5163739", "0.5161811", "0.51472056", "0.51455325", "0.5136964", "0.51348084", "0.51277924", "0.51254195", "0.5121975", "0.5121513", "0.5121513", "0.5117715", "0.51173025", "0.51170945", "0.51170945", "0.5114908", "0.511443", "0.51133144", "0.5111423", "0.51106447", "0.5107926", "0.5106721", "0.51039577", "0.50952464" ]
0.627386
0
Generate prime values using sieve of Eratosthenes method.
def create_primes(threshold): if threshold == 2: return [2] elif threshold < 2: return [] numbers = list(range(3, threshold + 1, 2)) root_of_threshold = threshold**0.5 half = int((threshold + 1) / 2 - 1) idx = 0 counter = 3 while counter <= root_of_threshold: if numbers[idx]: idy = int((counter * counter - 3) / 2) numbers[idy] = 0 while idy < half: numbers[idy] = 0 idy += counter idx += 1 counter = 2 * idx + 3 return [2] + [number for number in numbers if number]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def EratosthenesSieve(N):\n numbers = [True] * (N+1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p+1) if numbers[i]):\n for q in range(p*p, N+1, p):\n numbers[q] = False\n return [i for i in range(2, N+1) if numbers[i]]", "def EratosthenesSieve(N):\n numbers = [True] * (N + 1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p + 1) if numbers[i]):\n for q in range(p * p, N + 1, p):\n numbers[q] = False\n return [i for i in range(2, N + 1) if numbers[i]]", "def primes(n):\n sieve = [True]*n\n for p in range(2, n):\n if sieve[p]:\n yield p\n for i in range(p*p, n, p):\n sieve[i] = False", "def eratosthenes_sieve(n):\r\n\tnumbers = [True for i in range(n + 1)]\r\n\t\r\n\tp = 2\r\n\twhile (p**2 <= n):\r\n\t\tif numbers[p]:\r\n\t\t\tfor i in range(p**2, n + 1, p):\r\n\t\t\t\tnumbers[i] = False\r\n\t\tp += 1\r\n\t\t\r\n\tprimes = compress(range(2, n + 1),numbers[2:])\r\n\treturn list(primes)", "def sieve(upto):\n return list(prime_numbers(upto))", "def primes(m):\n if m <= 2:\n return ()\n sieve = [True] * m\n for i in sixn(m):\n if sieve[i]:\n yield i\n for mult in range(i * i, m, i):\n sieve[mult] = False", "def gen_primes():\n\n n = 1\n while True:\n while not isPrime(n):\n n += 1\n\n yield n\n n += 1", "def prime_numbers(upto):\n sieve = BitArray(upto + 1, 1)\n for number in xrange(2, upto + 1):\n if not sieve[number]:\n continue\n yield number\n for multiple in xrange(number ** 2, upto + 1, number):\n sieve[multiple] = 0\n return", "def primes(n):\n\tsieve = [True] * n\n\tyield 2\n\tfor i in xrange(3,int(n**0.5)+1,2):\n\t\tif sieve[i]:\n\t\t\tyield i\n\t\t\tsieve[i*i::2*i] = [False]*((n-i*i-1)/(2*i)+1)\n\tfor i in xrange(i+2,n,2):\n\t\tif sieve[i]: yield i", "def eratosthenes(x):\n multiples = []\n for i in range(2, x+1):\n if i not in multiples:\n print (i)\n for j in range(i*i, x+1, i):\n multiples.append(j)", "def sieve_of_eratosthenes(n: int) -> List[int]:\n\n prime = [True for i in range(n+1)] #initiate array named prime with all value True, ie everynumber [0,n] are prime\n p = 2\n while (p * p <= n):\n # If prime[p] is not\n # changed, then it is a prime\n if (prime[p] == True): #if any number is prime then its multiple must be composite\n # Update all multiples of p to be not prime \n for i in range(p * p, n+1, p):\n prime[i] = False\n p += 1\n\n\n '''\n till here the status of code is:\n 0:prime\n 1:prime\n 2:prime\n 3:prime\n 5:prime\n 7:prime\n 11:prime\n .\n .\n .\n\n But 0 and 1 are not prime, so we will have to count numbers from 2\n '''\n\n return [i for i, p in enumerate(prime[2:], 2) if p]", "def sieve(n: int) -> Generator[int, None, None]:\n primes, p = [i for i in range(2, n + 1)], 2\n while p**2 < n:\n for i in primes:\n if i % p == 0 and i != p:\n primes.remove(i)\n p += 1\n yield from primes", "def sieve_of_eratosthenes(n):\n res = [2]\n i = 3\n marked = set()\n while i <= n**.5:\n if i not in marked:\n res.append(i)\n j = 0\n while j <= n/i:\n marked.add(i + j*i)\n j += 1\n i += 2\n while i <= n:\n if i not in marked:\n res.append(i)\n i += 2\n return res", "def generatePrimesSieve(count):\n\tif count < 1:\n\t\treturn None\n\n\tsieve = itertools.count(3, 2)\n\tlastPrime = 2\n\tfor i in xrange(1, count):\n\t\tlastPrime = sieve.next()\n\t\tprint lastPrime\n\t\tsieve = filterPrime(sieve, lastPrime)\n\treturn lastPrime", "def primeSieve(n):\n result = []\n sieve = array.array('i', (True for i in range(0, n+1)))\n for k in range(2, n+1):\n if sieve[k]:\n result.append(k)\n i = k * k\n while i <= n:\n sieve[i] = False\n i += k\n return result", "def sieve(n):\n\tif n < 2:\n\t\treturn []\n\telse:\n\t\tis_prime = [True] * n\n\t\tis_prime[0] = is_prime[1] = False\n\t\tfor i in range(2, n):\n\t\t\tif is_prime[i]:\n\t\t\t\tyield i\n\t\t\t\tfor num in range(i*i, n, i):\n\t\t\t\t\tis_prime[num] = False", "def eratosthenes2(n):\n multiples = set()\n for i in range(2, n+1):\n if i not in multiples:\n yield i\n multiples.update(range(i*i, n+1, i))", "def prime_sieve(n):\n li = [True] * n\n li[0] = li[1] = False\n\n for (i, isprime) in enumerate(li):\n if isprime:\n yield i\n for j in range(i*i, n, i):\n li[j] = False\n return(li)", "def gen_prime():\n\n n = 100\n if n == 2:\n return [2]\n elif n < 2:\n return []\n s = range(3, n + 1, 2)\n mroot = n ** 0.5\n half = (n + 1) / 2 - 1\n i = 0\n m = 3\n while m <= mroot:\n if s[i]:\n j = (m * m - 3) / 2\n s[j] = 0\n while j < half:\n s[j] = 0\n j += m\n i = i + 1\n m = 2 * i + 3\n primes = [2] + [x for x in s if x]\n return (primes[random.randint(1, len(primes) - 1)])", "def primes():\n yield 2\n found_primes = [2]\n a = 3\n while True:\n for p in found_primes:\n if p**2 > a:\n found_primes.append(a)\n yield a\n a += 2\n break\n elif a % p == 0:\n a += 2\n break", "def eratosthenes(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = []\n mask = [1]*(limit+1)\n for i in range(2, limit+1):\n if mask[i]:\n primes.append(i)\n for j in range(i*i, limit+1, i):\n mask[j] = 0\n return np.asarray(primes)", "def sieve(limit):\n primes = []\n\n s = xrange(2, limit + 1)\n while len(s) != 0:\n primes.append(s[0])\n s = [n for n in s if (n % s[0]) != 0]\n\n return primes", "def sieve(upper=10**5):\n nums = [True] * (upper + 1)\n nums[0] = False\n nums[1] = False\n for i in range(2, upper + 1):\n if not nums[i]: continue\n for j in range(i * 2, upper + 1, i):\n nums[j] = False\n return nums", "def seive_of_eratosthenes(n):\n sieve = [ True for i in range(n+1) ]\n def markOff(pv):\n for i in range(pv+pv, n+1, pv):\n sieve[i] = False\n markOff(2)\n for i in range(3, n+1):\n if sieve[i]:\n markOff(i)\n return [ i for i in range(2, n+1) if sieve[i] ]", "def eratosthenes_mem(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = [2]\n multiples = [2]\n limit += 1\n for candidate in range(3, limit):\n if candidate not in multiples:\n primes.append(candidate)\n multiples.append(2*candidate)\n for i, m in enumerate(multiples):\n if m <= candidate:\n multiples[i] += primes[i]\n return np.asarray(primes)", "def gen_eratosthenes():\n n=3\n yield 2\n while True:\n count = 2 #set count to 2 because if count=1; all numbers are divisible by 1, so it is not a case we need to check\n this = True\n while count < n/2 + 1: #set to n/2 + 1 so that the amount of times iterated is minimized.\n if n%count == 0: #i.e. if n is divisble by count, then n is not prime\n count = n #ends this loop; if n is not prime, there is no reason to continue the loop\n this = False\n count += 1\n if this == True: #i.e. if this == True, then we know that the while loop was completely executed and n has no divisors except 1 and n\n yield n #yield n since it went through the entire loop without finding divisors\n n += 1 #increment n to see if n+1 is prime. will continue incrimenting until another prime is found and yields it", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def gen_primes():\n\tyield 2\n\tyield 3\n\tprime_list = [2, 3]\n\twhile 1:\n\t\tnext = prime_list[-1] + 2\n\t\ti = 0\n\t\twhile i < len(prime_list):\n\t\t\tif next%prime_list[i] == 0:\n\t\t\t\tnext+=2\n\t\t\t\ti=0\n\t\t\telse:\n\t\t\t\ti+=1\n\t\tprime_list.append(next)\n\t\tyield next", "def er_sieve(s):\n sis = [True] * (s + 1)\n\n sis[0] = False\n sis[1] = False\n p = 2\n\n while (p ** 2) <= s:\n if sis[p]:\n for i in range(p * 2, s + 1, p):\n sis[i] = False\n p += 1\n\n primes = []\n\n for j in range(2, s):\n if sis[j]:\n primes.append(j)\n\n return primes", "def gen_primes(N):\n primes = set()\n for n in range(2, N):\n if all(n % p > 0 for p in primes):\n primes.add(n)\n yield n", "def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2", "def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2", "def sieve(n):\n\n results = [1 for _ in range(n+1)]\n results[0], results[1] = 0, 0\n results = [0,0] + [1]*(n - 1)\n len(results)\n div = 2\n\n for i,num in enumerate(results):\n if num:\n k = i * 2\n while k <= n:\n seive[k] = 0\n k+= i\n return [x for (x,y) in enumerate(results) if y]\n\n while div <= n // 2 + 1:\n for i in range(div * div, n+1, div):\n if results[i] == 0:\n continue\n else:\n results[i] = 0\n div += 1\n\n #return sum(results)\n return [i for i in range(len(results)) if results[i] == 1]", "def prime_sieve(n):\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i*j < n:\n primes[i*j] = False\n return primes", "def sieveOfEra():\n D = {}\n yield 2\n for q in itertools.islice(itertools.count(3), 0, None, 2):\n p = D.pop(q, None)\n if p is None:\n D[q*q] = q\n yield q\n else:\n x = p + q\n while x in D or not (x&1):\n x += p \n D[x] = p", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def sieve(n):\n #All even numbers except 2 are not primes\n primes = [False, False, True] + [True, False] * (n / 2)\n\n #Start with 3\n p = 3\n\n while p*p <= n:\n if primes[p]:\n #p is prime, cross off all multiples of p, starting at the square \n #of p since all smaller multiples have already been crossed off\n d = p*p\n while d <= n:\n primes[d] = False\n d += p\n p += 2\n\n #Build a list of the primes we've found\n return [i for i in range(n) if primes[i]]", "def eratosthenes(n):\n assert n>1 #asserting n be a positive integer\n prime_list = []\n for i in range(2,n+1): #fills prime_list with all integers 2 <= i <= n\n prime_list.append(i)\n multiple = 2 #set to 2 because if set to 1 it will remove all elements from the list\n while multiple <= n/multiple:\n count = 2 #set to 2 because if set to 1 it will remove the prime itself from the list\n while count <= n/multiple:\n if count*multiple in prime_list: #checks if count*multiple is in list. needed because it could have already been removed\n prime_list.remove(count*multiple) #removes count*multiple\n count = count + 1\n multiple = multiple + 1\n #print(prime_list) #for testing only\n return prime_list", "def primes(n):\n sieve = [True] * n\n for i in range(3, int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in range(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*int(((n-i*i-1)/(2*i)+1))\n return [2] + [i for i in range(3,n,2) if sieve[i]]", "def primes():\n yield 2\n found = []\n for i in itertools.count(start=3, step=2):\n for p in found:\n if i % p == 0:\n break\n else:\n yield i\n found.append(i)", "def primeSieve(n):\n\tsieve = numpy.ones(n/3 + (n%6==2), dtype=numpy.bool)\n\tfor i in xrange(1,int(n**0.5)/3+1):\n\t\tif sieve[i]:\n\t\t\tk=3*i+1|1\n\t\t\tsieve[ k*k/3 ::2*k] = False\n\t\t\tsieve[k*(k-2*(i&1)+4)/3::2*k] = False\n\treturn numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)]", "def list_primes(limit):\n sieve = [False]*2 + [True] * (limit-2)\n n = 2\n while n <= sqrt(limit):\n if sieve[n]:\n yield n\n for m in xrange(n**2, limit, n): # multiples\n sieve[m] = False # mark multiples as non prime\n n += 1\n while n < limit:\n if sieve[n]:\n yield n\n n += 1", "def sieve(n):\n if n < 2:\n return []\n s = [True] * (n + 1)\n s[0], s[1] = False, False\n sq = int(n ** 0.5)\n for i in range(2, sq + 1):\n if s[i]:\n m = n // i - i\n s[i * i : n + 1 : i] = [False] * (m + 1)\n return [i for i in range(n + 1) if s[i]]", "def make_sieve(upper):\n\n if upper <= 0:\n return []\n\n sieve = [True for i in range(upper + 1)]\n limit = math.floor(math.sqrt(upper))\n sieve[0], sieve[1] = False, False\n\n for i in range(2, limit + 1):\n if sieve[i]:\n for j in range(i * 2, upper + 1, i):\n sieve[j] = False\n\n primes = []\n for num, is_prime in enumerate(sieve):\n if is_prime:\n primes.append(num)\n\n return primes", "def sieve(max):\n\tprimes = [False]*max\n\tfor i in range(2, int(math.sqrt(len(primes)))):\n\t\tif primes[i] == False:\n\t\t\tfor j in range(i*i, max, i):\n\t\t\t\tprimes[j] = True\n\tcount = 0\n\tprint(\"Prime numbers under \", max, \":\", sep='')\n\tfor j in range(2, max):\n\t\tif primes[j] == False:\n\t\t\tcount += 1\n\t\t\tif count % 20 == 0:\n\t\t\t\tprint(j)\n\t\t\telse:\n\t\t\t\tprint(j, end='\\t')\n\tprint()", "def gen_primes():\n\n # Maps composites to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\"\n # indefinitely, but only as long as required by the current number\n # being tested\n\n D = {}\n\n # The runing integer that is checked for primeness\n q = 2\n\n while True:\n if q not in D:\n # q is a new prime.\n # Yield it and mark its first multiple that isn't\n # already marked in previous iterations\n\n yield q\n D[q * q] = [q]\n else:\n # q is composite. D[q] is the list of primes that\n # divide it. Since we've reached q, we no longer\n # need it in the map, but we'll mark the next multiples\n # of its witnesses to prepare for larger numbers\n\n for p in D[q]:\n D.setdefault(p + q, []).append(p)\n del D[q]\n\n q += 1", "def gen_primes():\n\n # Maps composites (=non-primes) to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\" indefinitely,\n # but only as long as required by the current number being tested.\n D = {}\n\n q = 1 # the running integer that is checked for primeness\n while (q := q+1):\n if q not in D:\n # q is a new prime. Yield it and mark its first multiple that is\n # not already marked in previous iterations\n yield q\n D[q*q] = [q]\n else:\n # q is composite. D[q] is the list of primes that divide it. Since\n # we have reached q, we no longer need it in the map, but we will\n # mark the next multiples of its witnesses to prepare for larger\n # numbers\n for p in D[q]:\n D.setdefault(p+q, []).append(p)\n del D[q]", "def get_primes(n):\n\n return list(primes_sieve(n))", "def sieve_of_eratosthenes(n):\n primes = [True] * (n + 1)\n # because p is the smallest prime\n p = 2\n\n while p * p <= n:\n # if p is not marked as False, it is a prime\n if primes[p]:\n # mark all the multiples of number as False\n for i in range(p * 2, n + 1, p):\n primes[i] = False\n p += 1\n\n # getting all primes\n primes = [element for element in range(2, n + 1) if primes[element]]\n\n return primes", "def primes(n):\n sieve = [True] * n\n for i in range(3, int(n ** 0.5) + 1, 2):\n if sieve[i]:\n sieve[i * i::2 * i] = [False] * int(((n - i * i - 1) // (2 * i) + 1))\n return [2] + [i for i in range(3, n, 2) if sieve[i]]", "def sieve(n):\n s = [True] * (n + 1)\n for i in range(2, isqrt(n) + 1):\n if s[i]:\n for j in range(i + i, n + 1, i):\n s[j] = False\n return [i for i in range(2, n + 1) if s[i]]", "def generate_primes():\n # David Eppstein, UC Irvine, 28 Feb 2002\n # Source : http://code.activestate.com/recipes/117119/\n yield 2\n\n D = {} # map composite integers to primes witnessing their compositeness\n for q in count(start=3, step=2):\n if q not in D:\n yield q # not marked composite, must be prime\n D[q*q] = [q] # first multiple of q not already marked\n else:\n for p in D[q]: # move each witness to its next multiple\n D.setdefault(2*p+q,[]).append(p)\n del D[q] # no longer need D[q], free memory", "def sieve_for_primes_to(n):\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]", "def eratosthenes_sieve(iterable):\n iterable_set = set(iterable)\n mx = max(iterable)\n lst = [ x for x in range(2, mx)]\n for num in lst:\n tmp = 2\n while tmp < mx:\n iterable_set.discard(num * tmp)\n tmp += 1\n return iterable_set", "def test_primes_under_1000000(self):\n self.assertEqual(len(sieve(100)), 25)\n self.assertEqual(len(sieve(1000)), 168)\n self.assertEqual(len(sieve(10000)), 1229)\n self.assertEqual(len(sieve(100000)), 9592)\n self.assertEqual(len(sieve(1000000)), 78498)", "def gen_primes(limit=10000):\n\n candidates = set(range(2, limit))\n primes = []\n\n while len(candidates) > 0:\n prime = min(candidates)\n primes.append(prime)\n for number in range(prime, limit, prime):\n candidates.discard(number)\n\n return primes", "def sieve(n):\n global primes; lower = len(primes)\n if n+1 > lower:\n primes += [True, False] * ((n-lower)/2+1)\n for i in xrange(3, int(math.sqrt(n)+1), 2):\n if primes[i]:\n for j in xrange(3*i, n+1, 2*i):\n if j >= lower:\n primes[j] = False\n return [i for i, is_prime in enumerate(primes) if is_prime]", "def prime_gen():\n for i in memo_primes: yield i\n x = memo_primes[-1] + 1\n \n while True:\n if prime_with(x, memo_primes):\n yield x\n memo_primes.append(x)\n x += 1", "def gen_primes():\n # Maps composites to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\"\n # indefinitely, but only as long as required by the current\n # number being tested.\n\n D = {}\n\n # The running integer that's checked for primeness\n\n q = 2\n\n while True:\n if q not in D:\n # q is a new prime.\n # Yield it and mark its first multiple that isn't\n # already marked in previous iterations\n yield q\n D[q * q] = [q]\n else:\n # q is composite. D[q] is the list of primes that\n # divide it. Since we've reached q, we no longer\n # need it in the map, but we'll mark the next\n # multiples of its witnesses to prepare for larger\n # numbers\n for p in D[q]:\n D.setdefault(p + q, []).append(p)\n del D[q]\n\n q += 1", "def make_primes(n):\n out_list = []\n for i in range(2, n):\n if is_prime(i):\n out_list.append(i)\n return out_list", "def primes1(n):\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]", "def sieveOfSundaram(number):\n nNew = int((number -2) / 2)\n \n marked = [0] * number\n primes = []\n \n \n for i in range (1, nNew):\n j = i\n while i + j + (2*i*j) <= nNew:\n marked[i + j + (2*i*j)] = 1\n j += 1\n if number > 2:\n primes.append(2)\n \n for i, value in enumerate(marked):\n if i < nNew:\n if not value:\n primes.append(2*i + 1)\n \n return primes", "def get_primes(s):\n primes = bytearray([1] * s)\n for i in range(2, s):\n if primes[i] == 1:\n for j in range(i, s):\n if i * j < s:\n primes[i * j] = 0\n else:\n break\n return primes", "def genPrimes(n):\n assert n>1\n p = gen_eratosthenes()\n prime_list = []\n prime_list.append(next(p))\n while n > prime_list[len(prime_list)-1]: #while input is less than the last term in the prime list\n prime_list.append(next(p)) #adds next term from generator\n if n < prime_list[len(prime_list)-1]: #deletes last term\n del prime_list[len(prime_list)-1]\n #print(prime_list) #for testing only\n return prime_list", "def generate():\n j = [2]\n i = 3\n while i:\n if is_prime(i):\n j.append(i)\n yield [j, j[-1]]\n i += 2", "def primes():\n yield 1\n primes = []\n for n in itertools.count(2):\n if not any(n % p == 0 for p in primes):\n # No divisor found among previous primes\n yield n\n primes.append(n)", "def sieve_of_erat(N):\n erat_sieve = [True]*int(N/2)\n prime_list = []\n prime_list.append(2)\n for i in range(int((math.sqrt(N)-3)/2)+1): # Only need to run till sqrt(n)\n if erat_sieve[i] == True:\n j = i + (2*i+3)\n while j < int(N/2):\n erat_sieve[j] = False\n j += (2*i+3)\n for i in range(int(N/2)):\n if erat_sieve[i] == True:\n prime_list.append(2*i+3)\n \n return erat_sieve, prime_list", "def generatePrimesFrom2(n):\n sieve = bytearray([True]) * (n//2+1)\n for i in range(1,int(n**0.5)//2+1):\n if sieve[i]:\n sieve[2*i*(i+1)::2*i+1] = bytearray((n//2-2*i*(i+1))//(2*i+1)+1)\n return [2, *compress(range(3,n,2), sieve[1:])]", "def prime_generator():\r\n for i in itertools.count(start=1):\r\n for j in ((6 * i) - 1, (6 * i) + 1):\r\n if is_prime(j): yield(j)", "def sieve(self, upto_num):\n max_cur_known = self.max_known_number()\n \n num_new = upto_num - max_cur_known\n #All new numbers are primes until they are crossed off\n self.number_list.extend(array.array('b', [1])*num_new)\n \n for marker_num in range(2, maths.floor(maths.sqrt(upto_num)) + 1):\n #For efficiency only use prime marked numbers\n if not self.is_prime(marker_num):\n continue\n \n min_x = max(max_cur_known // marker_num + 1, marker_num)\n max_x = upto_num // marker_num\n \n for x in range(min_x, max_x + 1):\n self.number_list[marker_num*x] = 0 # Non-prime", "def sieve_of_erat(N):\n lim = int(N/2)\n if N % 2 == 0:\n lim -= 1\n erat_sieve = [True]*lim\n prime_list = []\n prime_list.append(2)\n for i in range(int((sqrt(N)-3)/2)+1): # Only need to run till sqrt(n)\n if erat_sieve[i] == True:\n j = i + (2*i+3)\n while j < lim:\n erat_sieve[j] = False\n j += (2*i+3)\n for i in range(lim):\n if erat_sieve[i] == True:\n prime_list.append(2*i+3)\n \n return erat_sieve, prime_list", "def sieve(n):\n\n primes = []\n sieve = [0] * n\n\n for i in range(2, n):\n if sieve[i] == 0:\n primes.append(i)\n sieve[i*i:n:i] = [1] * slice_length(i*i, n, i)\n\n return primes", "def prime_generator() -> int:\n \n #Start with the first prime.\n counter = count(2)\n candidate = next(counter)\n cache: list = [candidate]\n yield candidate\n \n # Set a flag.\n divisible = False\n while True:\n candidate = next(counter)\n # Check if the candidate is prime.\n for number in cache:\n # If number is greater than the squareroot of candidate, we are done.\n if number * number > candidate:\n break\n # If number divides candidate, candidate is not prime.\n if candidate % number == 0:\n divisible = True\n break\n # If is is prime, add it to the list.\n if not divisible:\n cache.append(candidate)\n yield candidate\n # Reset the flag.\n divisible = False", "def primes(n):\n return [i for i in xrange(1, n + 1) if mr_prime(i)]", "def primesupto(n):\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]", "def primesfrom2to(n):\n sieve = numpy.ones(n//3 + (n%6 == 2), dtype=numpy.bool)\n for i in range(1, int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[k*k//3::2*k] = False\n sieve[k*(k-2*(i&1)+4)//3::2*k] = False\n return numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)]", "def sieve(integer):\n \n is_prime = [1]*(integer+1)\n is_prime[0] = is_prime[1] = 0\n i = 2\n while i*i <= integer:\n if is_prime[i]:\n for j in range(i*i, integer+1, i):\n is_prime[j] = 0\n i += 1\n \n prime = [i for i,v in enumerate(is_prime) if v]\n\n return prime", "def phi(n):\n sieve = [i if i & 1 else i // 2 for i in range(n + 1)]\n for i in range(3, n + 1, 2):\n if sieve[i] == i:\n for j in range(i, n + 1, i):\n sieve[j] = (sieve[j] // i) * (i - 1)\n\n return sieve", "def gen_primes():\n D = defaultdict(list)\n q = 2\n while True:\n if q not in D:\n\n yield q \n D[q * q] = [q]\n else:\n for p in D[q]:\n D[p + q].append(p)\n del D[q]\n q += 1", "def Primes():\n candidate = 1\n _primes_so_far = [2] # first prime, only even prime\n yield _primes_so_far[-1]\n while True:\n candidate += 2 # check odds only from now on\n for prev in _primes_so_far:\n if prev**2 > candidate:\n yield candidate\n _primes_so_far.append(candidate)\n break\n if not divmod(candidate, prev)[1]: # no remainder!\n break # done looping", "def primes(n):\n sqrtN=n**0.5\n odds=[2]\n odds+=[i for i in range(3,n) if i%2>0]\n\n for i in odds:\n if i!=0 and i<=sqrtN:\n for j in odds[odds.index(i)+1:]:\n if j%i==0:\n odds[odds.index(j)]=0\n return [i for i in odds if i!=0]", "def primes(n):\n sieve = bytearray([True]) * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = bytearray((n-i*i-1)//(2*i)+1)\n return [2,*compress(range(3,n,2), sieve[1:])]", "def main():\n prime = gen_prime(1, 100000)\n print(prime)", "def primes(count):\n\n prime_nums = [2]\n prime = 3\n\n for i in range(1, count):\n\n while prime not in [3, 5, 7] and (\n prime % 3 == 0 or prime % 5 == 0 or prime % 7 == 0\n ):\n prime += 2\n\n prime_nums.append(prime)\n prime += 2\n\n return prime_nums", "def primesfrom2to( n ):\n sieve = numpy.ones( n / 3 + ( n % 6 == 2 ), dtype = numpy.bool )\n for i in range( 1, int( ( n ** 0.5 ) / 3 ) + 1 ):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[ k * k / 3 ::2 * k] = False\n sieve[k * ( k - 2 * ( i & 1 ) + 4 ) / 3::2 * k] = False\n return numpy.r_[2, 3, ( ( 3 * numpy.nonzero( sieve )[0][1:] + 1 ) | 1 )]", "def primes(max_number_of_primes) -> iter:\n number_primes = count(1)\n prime = prime_generator()\n while next(number_primes) <= max_number_of_primes:\n yield next(prime)", "def primes_from_2_to(n):\n sieve = np.ones(n // 3 + (n % 6 == 2), dtype=bool)\n for i in range(1, int(n ** 0.5) // 3 + 1):\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]", "def primesfrom2to(n):\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in range(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)//3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def sieve8(n):\n prime = np.ones(n//3 + (n%6==2), dtype=np.bool)\n for i in range(3, int(n**.5) + 1, 3):\n if prime[i // 3]:\n p = (i + 1) | 1\n prime[ p*p//3 ::2*p] = False\n prime[p*(p-2*(i&1)+4)//3::2*p] = False\n result = (3 * prime.nonzero()[0] + 1) | 1\n result[0] = 3\n return np.r_[2,result]", "def primesfrom2to(n):\n sieve = numpy.ones(n/3 + (n%6==2), dtype=numpy.bool)\n for i in xrange(1,int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k/3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)/3::2*k] = False\n return numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)]", "def primesfrom2to(n):\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n for i in range(1,int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k//3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0][1:]+1)|1)]", "def primes(n):\n if n == 0 or n == 1:\n return []\n else:\n p = primes(int(sqrt(n)))\n no_p = { j for i in p for j in xrange(i*2, n+1, i) }\n p = { x for x in xrange(2, n + 1) if x not in no_p }\n return p", "def primesList(n):\n sieve = [True]*n\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[2*i::i] = [False]*(len(sieve[2*i::i]))\n return [2]+[i for i in range(3,n,2) if sieve[i]]", "def get_primes_over(limit):\n candidate = 1000000\n count = 0\n while count < limit:\n if is_prime(candidate):\n yield candidate\n count += 1\n candidate += 1\n else:\n candidate += 1", "def generate_primes(L):\n # We need to compute the Bound of the factor set.\n i = 0\n list_p = []\n for p in prime_sieve():\n i += 1\n list_p.append(p)\n if i >= L:\n break\n return list_p", "def prime_generator():\n i = 0 # prime numbers counter\n num = 0 # current number\n while True:\n num += 1\n if is_prime(num):\n i += 1\n yield i, num", "def primes2(n):\n correction = (n%6>1)\n n = {0:n,1:n-1,2:n+4,3:n+3,4:n+2,5:n+1}[n%6]\n sieve = [True] * (n/3)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)/3) ::2*k]=[False]*((n/6-(k*k)/6-1)/k+1)\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k]=[False]*((n/6-(k*k+4*k-2*k*(i&1))/6-1)/k+1)\n return [2,3] + [3*i+1|1 for i in xrange(1,n/3-correction) if sieve[i]]", "def primesfrom2to(n):\n sieve = np.ones(n // 3 + (n % 6 == 2), dtype=np.bool)\n for i in range(1, int(n ** 0.5) // 3 + 1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]", "def erato(n):\n sieve = [0, 0, 1] + [1, 0] * (n//2) # [0 0 1 1 0 1 0...]\n\n prime = 3 # initial odd prime\n\n while prime**2 <= n:\n for i in range(prime**2, n+1, prime*2): \n sieve[i] = 0 # step through sieve by prime*2\n\n prime += 1 + sieve[prime+1:].index(1) # get next prime\n\n # filter includes corresponding integers where sieve = 1\n\n return filter(lambda i, sieve=sieve: sieve[i], range(n+1))" ]
[ "0.7907143", "0.7902661", "0.7900705", "0.7870067", "0.786426", "0.7795188", "0.77421093", "0.7724749", "0.76443654", "0.7612922", "0.758561", "0.7580121", "0.75798064", "0.75734884", "0.7526076", "0.74892634", "0.7484998", "0.7456353", "0.74515086", "0.7430674", "0.7415825", "0.74022704", "0.73947823", "0.7345324", "0.73261136", "0.73145556", "0.7311978", "0.73085713", "0.729819", "0.72955006", "0.72851807", "0.72851807", "0.72731465", "0.72644293", "0.7262467", "0.7261658", "0.7261658", "0.725955", "0.725536", "0.7247443", "0.7246793", "0.724314", "0.7230894", "0.7219585", "0.7214854", "0.7210034", "0.72087157", "0.7179185", "0.7175763", "0.71745807", "0.71630967", "0.71615", "0.7117119", "0.70501274", "0.7034713", "0.7023465", "0.70166355", "0.6995457", "0.6979989", "0.69771975", "0.6973202", "0.6970365", "0.69697577", "0.69682896", "0.6964922", "0.6952886", "0.6950099", "0.6931514", "0.69285756", "0.6925086", "0.6914153", "0.69020617", "0.6897912", "0.6887251", "0.68788016", "0.6866126", "0.68477386", "0.68462604", "0.6841565", "0.6834284", "0.6811344", "0.6810563", "0.6801581", "0.67993784", "0.6790409", "0.6788543", "0.67810494", "0.67798996", "0.6762523", "0.67397386", "0.67188597", "0.67167586", "0.67047256", "0.6699455", "0.6697439", "0.6693933", "0.6692184", "0.6690373", "0.66769993", "0.6670832", "0.6669267" ]
0.0
-1
Calculate the distance in km between two points given in decimal degrees. Uses Haversine formula, i.e. assuming perfectly spherical earth
def Distance(VCoords, SCoords): #Convert to radians lat1,lon1,lat2,lon2 = map(math.radians, VCoords+SCoords) #Apply Haversine formula dlon = lon2-lon1 dlat = lat2-lat1 a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2 c = 2 * math.asin(math.sqrt(a)) #Earth radius = 6371km return 6371 * c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def haversine(lat1, lon1, lat2, lon2):\n\t\t # convert decimal degrees to radians \n\t\t lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\t\t # haversine formula \n\t\t dlon = lon2 - lon1 \n\t\t dlat = lat2 - lat1 \n\t\t a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n\t\t c = 2 * asin(sqrt(a)) \n\t\t km = 6367 * c\n\t\t return km", "def spherical_distance(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n km = 6373 * c\n km = '%d' % km\n return float(km)", "def distance(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n return km", "def calcDistance(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n return km * 1000", "def dist_between(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2\n c = 2 * np.arcsin(np.sqrt(a)) \n km = 6367 * c\n return km", "def haversine(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km", "def distance(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * asin(sqrt(a))\n m = 6367 * c * 1000\n return m", "def distance(lat0, lng0, lat1, lng1):\n # convert decimal degrees to radians \n lat0, lng0, lat1, lng1 = map(radians, [lat0, lng0, lat1, lng1])\n # haversine formula \n dlng = lng1 - lng0 \n dlat = lat1 - lat0 \n a = sin(dlat/2)**2 + cos(lat0) * cos(lat1) * sin(dlng/2)**2\n c = 2 * asin(sqrt(a)) \n m = 6367000 * c\n return m", "def haversine(lon1, lat1, lon2, lat2):\r\n # convert decimal degrees to radians\r\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\r\n # haversine formula\r\n dlon = lon2 - lon1\r\n dlat = lat2 - lat1\r\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\r\n c = 2 * asin(sqrt(a))\r\n km = 6367 * c\r\n return km", "def haversine(lon1, lat1, lon2, lat2):\n\t# convert decimal degrees to radians \n\tlon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\t# haversine formula \n\tdlon = lon2 - lon1 \n\tdlat = lat2 - lat1 \n\ta = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n\tc = 2 * asin(sqrt(a)) \n\tkm = 6367 * c\n\n\n\treturn km", "def haversine_dist(lat1, lng1, lat2, lng2):\n R = 6371 # mean radius of earth in kms\n # convert decimal degrees to radians \n lng1, lat1, lng2, lat2 = map(radians, [lng1, lat1, lng2, lat2])\n # haversine formula \n dlon = lng2 - lng1 \n dlat = lat2 - lat1 \n a = (sin(dlat/2))**2 + cos(lat1) * cos(lat2) * (sin(dlon/2))**2\n c = 2 * atan2(sqrt(a), sqrt(1-a))\n km = R * c\n return km", "def haversine(lon1, lat1, lon2, lat2):\r\n # convert decimal degrees to radians \r\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\r\n # haversine formula \r\n dlon = lon2 - lon1 \r\n dlat = lat2 - lat1 \r\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\r\n c = 2 * asin(sqrt(a)) \r\n km = 6367 * c\r\n return km", "def harversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n # harversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat/2.)**2. + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2.)**2.\n c = 2. * math.asin(math.sqrt(a))\n km = 6371. * c # radius of earth\n return km", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = list(map(radians, [lon1, lat1, lon2, lat2]))\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n m = 1000. * km\n return m", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\n c = 2 * math.asin(math.sqrt(a)) \n km = 6367 * c\n return km", "def haversine(lon1, lat1, lon2, lat2): \r\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) \r\n #print 34\r\n dlon = lon2 - lon1 \r\n dlat = lat2 - lat1 \r\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 \r\n c = 2 * atan(sqrt(a)/sqrt(1-a)) \r\n r = 6371 \r\n d=c * r\r\n #print type(d)\r\n return d", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n return km", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n return km", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1 = math.radians(lon1)\n lat1 = math.radians(lat1)\n lon2 = math.radians(lon2)\n lat2 = math.radians(lat2)\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n hav_a = (math.sin(dlat/2)**2\n + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2)\n hav_c = 2 * math.asin(math.sqrt(hav_a))\n\n # 6367 km is the radius of the Earth\n dist_km = 6371 * hav_c\n return dist_km", "def calc_distance_two_points(lat_from, long_from, lat_to, long_to):\n distance_in_km = haversine(\n (lat_from, long_from),\n (lat_to, long_to),\n unit='km')\n\n return distance_in_km", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = list(map(radians, [lon1, lat1, lon2, lat2]))\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km", "def haversine(lat2, lon2):\n\n lat1 = 53.342998628\n lon1 = -6.256165642\n\n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = np.sin(dlat / 2.0) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0) ** 2\n\n c = 2 * np.arcsin(np.sqrt(a))\n km = 6367 * c\n\n return km", "def haversine(self,lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n return km*1000", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n # Radius of earth in kilometers is 6371\n m = 6371000* c #meters\n return m", "def haversine(lon1, lat1, lon2, lat2):\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371000 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def get_spherical_distance(lat1,lat2,long1,long2):\n lat1,lat2,long1,long2= float(lat1),float(lat2),float(long1),float(long2)\n q=radians(lat2-lat1)\n r=radians(long2-long1)\n lat2r=radians(lat2)\n lat1r=radians(lat1)\n a=sin(q/2)*sin(q/2)+cos(lat1r)*cos(lat2r)*sin(r/2)*sin(r/2)\n c=2*atan2(sqrt(a),sqrt(1-a))\n R=6371*1000\n d=R*c\n return d", "def haversine(lon1, lat1, lon2, lat2):\n # https://stackoverflow.com/questions/15736995/how-can-i-quickly-estimate-the-distance-between-two-latitude-longitude-points\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n # Radius of earth in kilometers is 6371\n km = 6371 * c\n return km", "def haversine(lon1, lat1, lon2, lat2):\n\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return (km * 1000)", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = \\\n [math.radians(deg) for deg in [lon1, lat1, lon2, lat2]]\n # haversine formula \n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * \\\n math.cos(lat2) * math.sin(dlon / 2) ** 2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n meters = 6367 * c * 1000\n return meters", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n # Radius of earth in kilometers is 6371\n km = 6371* c\n return km", "def get_spherical_distance(lat1,lat2,long1,long2):\n q=radians(lat2-lat1)\n r=radians(long2-long1)\n lat2r=radians(lat2)\n lat1r=radians(lat1)\n a=sin(q/2)*sin(q/2)+cos(lat1r)*cos(lat2r)*sin(r/2)*sin(r/2)\n c=2*atan2(sqrt(a),sqrt(1-a))\n R=6371*1000\n d=R*c\n return d", "def Haversine(c1, c2):\n lat1 = float(c1.split(\",\")[0])\n lon1 = float(c1.split(\",\")[1])\n lat2 = float(c2.split(\",\")[0])\n lon2 = float(c2.split(\",\")[1])\n\n\n #convert degrees into radians\n lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 3959.87433 # Radius of earth in miles\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n # Radius of earth in kilometers is 6371\n km = 6371* c\n return km", "def coord_distance(lat1, lon1, lat2, lon2):\n\tlon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n\tdlon = lon2 - lon1\n\tdlat = lat2 - lat1\n\ta = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\n\tc = 2 * math.asin(math.sqrt(a))\n\tkm = 6367 * c \n\treturn km", "def haversine(lat1: float, lon1: float, lat2: float, lon2: float) -> float:\n lat1, lon1, lat2, lon2, = map(radians, [lat1, lon1, lat2, lon2])\n # average earth radius\n R = 6372.8\n dlat = lat2 - lat1\n dlon = lon2 - lon1\n sin_lat_squared = sin(dlat * 0.5) * sin(dlat * 0.5)\n sin_lon_squared = sin(dlon * 0.5) * sin(dlon * 0.5)\n computation = asin(sqrt(sin_lat_squared + sin_lon_squared * cos(lat1) * cos(lat2)))\n d = 2 * R * computation\n return d", "def calc_distance_haversine(coord1, coord2):\n lat1, lon1 = coord1\n lat2, lon2 = coord2\n # approximate radius of earth in km\n R = 6373.0\n\n lat1 = radians(lat1)\n lon1 = radians(lon1)\n lat2 = radians(lat2)\n lon2 = radians(lon2)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n\n return distance", "def calculate_distance(point1, point2):\n import math\n\n def convert_to_radians(degrees):\n return degrees * math.pi / 180\n\n radius_earth = 6.371E3 # km\n phi1 = convert_to_radians(point1[0])\n phi2 = convert_to_radians(point2[0])\n delta_phi = convert_to_radians(point1[0] - point2[0])\n delta_lam = convert_to_radians(point1[1] - point2[1])\n\n\n a = math.sin(0.5 * delta_phi)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(0.5 * delta_lam)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return radius_earth * c / 1.60934 # convert km to miles", "def calculate_distance(point1, point2):\n import math\n\n def convert_to_radians(degrees):\n return degrees * math.pi / 180\n\n radius_earth = 6.371E3 # km\n phi1 = convert_to_radians(point1[0])\n phi2 = convert_to_radians(point2[0])\n\n delta_phi = convert_to_radians(point1[0] - point2[0])\n delta_lam = convert_to_radians(point1[1] - point2[1])\n\n a = math.sin(0.5 * delta_phi)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(0.5 * delta_lam)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return radius_earth * c / 1.60934 # convert km to miles", "def haversine_np(lon1, lat1, lon2, lat2):\n\n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2\n\n c = 2 * np.arcsin(np.sqrt(a))\n km = 6367 * c\n\n return km", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n # Radius of earth in kilometers is 6371\n m = 6371 * c * 1000\n return m", "def haversine(lat1, lon1, lat2, lon2, radius=6371):\n from math import radians, sin, cos, sqrt, asin\n dLat = radians(lat2 - lat1)\n dLon = radians(lon2 - lon1)\n lat1 = radians(lat1)\n lat2 = radians(lat2)\n c = 2 * asin(sqrt(sin(dLat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dLon / 2) ** 2))\n return radius * c * 1000 # return in meters", "def haversine(lon1, lat1, lon2, lat2):\n lon1, lat1, lon2, lat2 = float(lon1), float(lat1), float(lon2), float(lat2)\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n # Radius of earth in kilometers is 6371\n m = 6371 * c * 1000\n return m", "def distance(a, b):\n return vincenty((float(a.longitude), float(a.latitude)),\n (float(b.longitude), float(b.latitude))).km", "def dist(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversin(lat1, lon1, lat2, lon2):\n r = 3956.545 # Radius of the Earth in miles\n\n # Conver to radians\n lat1 = np.pi/180*lat1\n lon1 = np.pi/180*lon1\n lat2 = np.pi/180*lat2\n lon2 = np.pi/180*lon2\n\n # Haversin formula\n d = 2*r*np.arcsin(np.sqrt(\\\n np.sin((lat2 - lat1)/2)**2 + \\\n np.cos(lat1) * np.cos(lat2)*\\\n np.sin((lon2 - lon1)/2)**2))\n return d", "def distance_between(lat_1, lon_1, lat_2, lon_2):\n lat_1, lon_1 = math.radians(lat_1), math.radians(lon_1)\n lat_2, lon_2 = math.radians(lat_2), math.radians(lon_2)\n theta = lon_1 - lon_2\n dist = math.sin(lat_1)*math.sin(lat_2) + math.cos(lat_1)*math.cos(lat_2)*math.cos(theta)\n dist = math.acos(dist)\n dist = math.degrees(dist)\n dist = dist * 69.06 # 69.09 = circumference of earth in miles / 360 degrees\n return dist", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n metres = km * 1000\n return metres", "def get_distance(lat1, lon1, lat2, lon2):\n phi1 = math.radians(lat1)\n phi2 = math.radians(lat2)\n d_phi = math.radians(lat2 - lat1)\n d_lam = math.radians(lon2 - lon1)\n a = math.sin(d_phi/2) ** 2 + math.cos(phi1) * math.cos(phi2) * math.sin(d_lam/2)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n return 6371000 * c", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return convert_km_to_mi(km)", "def calculateDistance(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians\n lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])\n\n # haversine formula\n dlat = lat2 - lat1\n dlon = lon2 - lon1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(point_1, \n point_2):\n # Specify the radius of the Earth in kilometers\n earth_radius = 6372.8\n # Extract latitudes and longitudes from the provided points\n latitude_1 = point_1[0]\n latitude_2 = point_2[0]\n longitude_1 = point_1[1]\n longitude_2 = point_2[1]\n # Convert the latitudes and longitudes to radians\n latitude_1, longitude_1 = np.radians((latitude_1, longitude_1))\n latitude_2, longitude_2 = np.radians((latitude_2, longitude_2))\n # Calculate the differences between latitudes in radians\n latitude_difference = latitude_2 - latitude_1\n # Calculate the differences between longitudes in radians\n longitude_difference = longitude_2 - longitude_1\n # Calculate the haversine distance between the coordinates\n step_1 = np.square(np.sin(np.multiply(latitude_difference, 0.5)))\n step_2 = np.square(np.sin(np.multiply(longitude_difference, 0.5)))\n step_3 = np.multiply(np.cos(latitude_1), np.cos(latitude_2))\n step_4 = np.arcsin(np.sqrt(step_1 + np.multiply(step_2, step_3)))\n haversine_distance = np.multiply(np.multiply(2, earth_radius), step_4)\n # Return the computed haversine distance for the coordinates\n return haversine_distance", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2.)**2. + cos(lat1) * cos(lat2) * sin(dlon/2.)**2.\n c = 2. * asin(sqrt(a)) \n\n # 6378.1 km is the radius of the Earth = 6378100 m\n m = 6378100. * c\n return m", "def calc_dist(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n # Radius of earth in kilometers is 6371\n mtr = 6371000 * c\n return mtr", "def haversine(lat1, lon1, lat2, lon2):\r\n # convert decimal degrees to radians\r\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\r\n\r\n # haversine formula\r\n dlon = lon2 - lon1\r\n dlat = lat2 - lat1\r\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\r\n c = 2 * asin(sqrt(a))\r\n r = 3959 # Radius of earth in miles\r\n return c * r", "def getDist(lat1,long1,lat2,long2):\n\tlat1 = math.radians(lat1)\n\tlong1 = math.radians(long1)\n\tlat2 = math.radians(lat2)\n\tlong2 = math.radians(long2)\n\tR = 6371 # km\n\td = cmath.acos(cmath.sin(lat1) * cmath.sin(lat2) + \\\n\tcmath.cos(lat1) * cmath.cos(lat2) *\n\tcmath.cos(long2 - long1)) * R\n\treturn abs(d) # cast to float", "def calculate_distance(point1, point2):\n\n def convert_to_radians(degrees):\n return degrees * math.pi / 180\n\n radius_earth = 6.371E3 # km\n phi1 = convert_to_radians(point1[0])\n phi2 = convert_to_radians(point2[0])\n delta_phi = convert_to_radians(point1[0] - point2[0])\n delta_lam = convert_to_radians(point1[1] - point2[1])\n\n a = math.sin(0.5 * delta_phi)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(0.5 * delta_lam)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return radius_earth * c / 1.60934 # convert km to miles", "def distance(gps1, gps2):\n return haversine(gps1.lng, gps1.lat, gps2.lng, gps2.lat)", "def haversine(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 3956 # Radius of earth in kilometers. Use 3956 for miles\n return c * r * 1.60934", "def haversine(lon1, lat1, lon2, lat2):\n\t# convert decimal degrees to radians \n\tlon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n\t# haversine formula\n\tdlon = lon2 - lon1 \n\tdlat = lat2 - lat1 \n\ta = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n\tc = 2 * asin(sqrt(a)) \n\tr = 6371000 # Radius of earth in meters. Use 3956 for miles\n\treturn c*r", "def haversine(lon1, lat1, lon2, lat2): # 经度1,纬度1,经度2,纬度2 \n # 将十进制度数转化为弧度 \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) \n \n # haversine公式 \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 \n c = 2 * asin(sqrt(a)) \n r = 6371 # 地球平均半径,单位为公里 \n return c * r", "def haversine(lat1, lat2, lon1, lon2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [float(lon1), float(lat1), float(lon2), float(lat2)])\n \n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def dist(loc1, loc2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [loc1[1], loc1[0], loc2[1], loc2[0]])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n # Radius of earth in kilometers is 6371\n km = 6371 * c\n return km", "def distance(lat1, lon1, lat2, lon2):\r\n earth_radius=3959.0 #miles\r\n if lat1==lat2 and lon1==lon2:\r\n dst=0\r\n else:\r\n dst = acos(\r\n (sin(radians(lat1)) * sin(radians(lat2))) +\r\n (cos(radians(lat1)) * cos(radians(lat2)) * cos(radians(lon1) - radians(lon2)))\r\n ) * earth_radius\r\n return dst", "def haversine(p1, p2):\n # Convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(np.radians, [p1[0], p1[1], p2[0], p2[1]])\n \n # Haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2\n c = 2 * np.arcsin(np.sqrt(a)) \n r = 6371 \n \n return c * r", "def coord_dist_meters(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat/2)**2 + math.cos(lat1) * \\\n math.cos(lat2) * math.sin(dlon/2)**2\n c = 2 * math.asin(math.sqrt(a))\n r = 6371000 # Radius of earth in meters. Use 3956 for miles\n return c * r", "def coord_distance(lat1, lon1, lat2, lon2):\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\n km = 2 * 6367 * math.asin(math.sqrt(a))\n mi = 0.621371 * km\n return mi", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n\tdirectionDict = {'North':[0,90],'East':[90,180],'South':[180,270],'West':[270,360]}\n\tlon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n\t# haversine formula \n\tdlon = lon2 - lon1 \n\tdlat = lat2 - lat1 \n\ta = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n\tc = 2 * asin(sqrt(a)) \n\tr = 6371 # Radius of earth in kilometers. Use 3956 for miles\n\n\t\"\"\"Calculate bearing (angle) between 2 coordinates\"\"\"\n\tbearing = atan2(sin(lon2-lon1)*cos(lat2), cos(lat1)*sin(lat2)-sin(lat1)*cos(lat2)*cos(lon2-lon1))\n\tbearing = degrees(bearing)\n\tbearing = (bearing + 360) % 360\n\n\tdirection = \"\"\n\tfor k,v in directionDict.iteritems():\n\t\tif bearing >= v[0] and bearing < v[1]:\n\t\t\tdirection = k\n\n\treturn c * r, bearing, direction", "def haversine(lon1, lat1, lon2, lat2): # 经度1,纬度1,经度2,纬度2 (十进制度数)\n lon1 = float(lon1)\n lat1 = float(lat1)\n lon2 = float(lon2)\n lat2 = float(lat2)\n\n if lon1 == 0 or lat1 == 0:\n return float(999999999)\n\n if lon2 == 0 or lon2 ==[]:\n return float(999999999)\n\n if lat2 == 0 or lat2 ==[]:\n return float(999999999)\n\n\n # 将十进制度数转化为弧度\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine公式\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371 # 地球平均半径,单位为公里\n return c * r * 1000", "def distance(lat1, lon1, lat2, lon2):\r\n radius = 6373 * 1000\r\n dlon = lon2 - lon1\r\n dlat = lat2 - lat1\r\n a = (math.sin(dlat/2))**2 + math.cos(lat1) * math.cos(lat2) * (math.sin(dlon/2))**2\r\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\r\n return radius * c", "def distance(lat1, lon1, lat2, lon2):\n lon1, lat1 = math.radians(lon1), math.radians(lat1)\n lon2, lat2 = math.radians(lon2), math.radians(lat2)\n a = (math.sin((lat2 - lat1) / 2) ** 2 +\n math.cos(lat1) * math.cos(lat2) * math.sin((lon2 - lon1) / 2) ** 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = 6371000 * c\n\n return d", "def haversine(lon1, lat1, lon2, lat2):\n \n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n \n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 3956 #radius of earth in miles mean of poles and equator radius\n return c * r", "def get_euclidian_distance(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\n c = 2 * math.asin(math.sqrt(a))\n # Radius of earth in kilometers is 6371\n km = 6371 * c\n return km", "def haversine(\n lat1: float, lon1: float, lat2: float, lon2: float, *, unit: str = \"metric\"\n) -> float:\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n calc_a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n calc_c = 2 * asin(sqrt(calc_a))\n\n if unit == \"metric\":\n return AVG_EARTH_RADIUS_METRIC * calc_c\n return AVG_EARTH_RADIUS_IMPERIAL * calc_c", "def haversin(lat1, lon1, lat2, lon2):\n r = 3956.545\n # Conver to radians\n lat1 = np.pi/180*lat1\n lon1 = np.pi/180*lon1\n lat2 = np.pi/180*lat2\n lon2 = np.pi/180*lon2\n\n\n d = 2*r*np.arcsin(np.sqrt(\\\n np.sin((lat2-lat1)/2)**2 + \\\n np.cos(lat1)*np.cos(lat2)*\\\n np.sin((lon2-lon1)/2)**2))\n return d", "def haversine(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def distance_in_meters(coord1, coord2):\n return vincenty(coord1, coord2).meters", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r * 1000", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 3956 # Radius of earth in miles. Use 6371 for kms\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n from math import radians, cos, sin, asin, sqrt\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n\n Re = 6378.137\n\n # convert decimal degrees to radians\n deg2rad = np.pi / 180.\n lon1 = np.array(lon1) * deg2rad\n lat1 = np.array(lat1) * deg2rad\n lon2 = np.array(lon2) * deg2rad\n lat2 = np.array(lat2) * deg2rad\n\n if lon2.shape:\n N = lon2.shape[0]\n lon1 = np.repeat(lon1, N)\n lat1 = np.repeat(lat1, N)\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = np.sin(dlat / 2.)**2. + np.cos(lat1) * \\\n np.cos(lat2) * np.sin(dlon / 2.)**2.\n c = 2. * np.arcsin(np.sqrt(a))\n km = Re * c\n return km", "def aversine(lon1, lat1, lon2, lat2):\n\n lon1 = float(lon1)\n lon2 = float(lon2)\n lat1 = float(lat1)\n lat2 = float(lat2)\n\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\n c = 2 * math.asin(math.sqrt(a))\n meters = 6356988 * c\n\n return meters", "def haversine(lon1, lat1, lon2, lat2): # 经度1,纬度1,经度2,纬度2 (十进制度数)\n # 将十进制度数转化为弧度\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine公式\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371 # 地球平均半径,单位为公里\n return c * r * 1000", "def haversine(lon1, lat1, lon2, lat2):\n from math import radians, cos, sin, asin, sqrt\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n \n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 3956 # Radius of earth in MILES. Use 6371 for KM\n return round(c * r, 4)", "def haversine(gps1, gps2):\n (lon1, lat1) = gps1\n (lon2, lat2) = gps2\n\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\n c = 2 * math.asin(np.sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(p1=None, p2=None):\n lat1, lon1 = p1\n lat2, lon2 = p2\n p = pi/180\n a = 0.5 - cos((lat2-lat1)*p)/2 + cos(lat1*p) * cos(lat2*p) * (1-cos((lon2-lon1)*p))/2 \n return 12742 * asin(sqrt(a)) #2*R*asin...", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n\n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def great_circle(lat_1, long_1, lat_2, long_2):\n long_1 = m.radians(long_1)\n lat_1 = m.radians(lat_1)\n long_2 = m.radians(long_2)\n lat_2 = m.radians(lat_2)\n\n d = 2 * 6367.45 * m.asin(\n m.sqrt(haversine(lat_2 - lat_1)\n + m.cos(lat_1)*m.cos(lat_2) *\n haversine(long_2 - long_1)))\n return d", "def haversine(lon1, lat1, lon2, lat2):\r\n # convert decimal degrees to radians \r\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\r\n\r\n # haversine formula \r\n dlon = lon2 - lon1 \r\n dlat = lat2 - lat1 \r\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\r\n c = 2 * asin(sqrt(a)) \r\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\r\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 * 1000 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def calculateDistanceBetweenPoints(lat1,lon1,lat2,lon2):\n\treturn Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['s12']", "def calculate_distance(x: float, y: float) -> float:\n # return geopy.distance.vincenty(x, y).km\n R = 6370\n lat1 = radians(x[0]) #insert value\n lon1 = radians(x[1])\n lat2 = radians(y[0])\n lon2 = radians(y[1])\n\n dlon = lon2 - lon1\n dlat = lat2- lat1\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1-a))\n distance = R * c\n return distance", "def harversine_distance(self, other):\n\n lat1, lon1, lat2, lon2 = (\n a/180*pi for a in [self.stop_lat, self.stop_lon, other.stop_lat, other.stop_lon])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon/2) ** 2\n c = 2 * asin(min(1, sqrt(a)))\n d = 3956 * 1609.344 * c\n return d", "def get_distance(lat1, lon1, lat2, lon2) -> float:\n # Earth radius in meters\n radius = 6371000\n\n # Degress to radian\n lat1, lon1, lat2, lon2 = map(np.deg2rad, [lat1, lon1, lat2, lon2])\n\n # Deltas\n dlat = lat2 - lat1\n dlon = lon2 - lon1\n\n # Calculate distance\n arch = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2) ** 2\n arch_sin = 2 * np.arcsin(np.sqrt(arch))\n\n return radius * arch_sin", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine_distance(df, lat1, long1, lat2, long2):\n r = 6371 # average radius of Earth in kilometers\n phi1 = np.radians(df[lat1])\n phi2 = np.radians(df[lat2])\n\n delta_phi = np.radians(df[lat2] - df[lat1])\n delta_lamda = np.radians(df[long2] - df[long1])\n\n a = np.sin(delta_phi/2) ** 2 + np.cos(phi1) * np.cos(phi2) * \\\n np.sin(delta_lamda / 2) * np.sin(delta_lamda / 2)\n c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))\n d = (r * c) # in kilometers\n\n return d", "def get_distance_from_point(long1, lati1, long2, lati2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [long1, lati1, long2, lati2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def _greatCircleDistance(self, long1, lat1, long2, lat2):\n # convert decimal degrees to radians \n long1, lat1, long2, lat2 = map(radians, [float(long1), float(lat1), float(long2), float(lat2)])\n # haversine formula \n dlon = long2 - long1\n #print(long2)\n #print(long1) \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n #print(c*r)\n return c * r" ]
[ "0.7997606", "0.79383963", "0.7895904", "0.78638697", "0.7796029", "0.77701026", "0.77680063", "0.77678233", "0.77571", "0.7750529", "0.7741123", "0.77320206", "0.7727604", "0.7691649", "0.7687481", "0.76833457", "0.7681609", "0.7681609", "0.7670106", "0.7646074", "0.76412785", "0.7596498", "0.7574836", "0.7574301", "0.7571678", "0.75628203", "0.7549929", "0.75428736", "0.75424343", "0.7537846", "0.75282747", "0.7517322", "0.7516897", "0.7509961", "0.74985313", "0.7494609", "0.7491288", "0.7488611", "0.74842554", "0.7473674", "0.7472633", "0.7456921", "0.7448881", "0.74468637", "0.74297804", "0.7427669", "0.7426218", "0.7424036", "0.74132895", "0.7396466", "0.73826706", "0.7380429", "0.73768914", "0.73754185", "0.7360807", "0.73606956", "0.73528194", "0.7324643", "0.7321173", "0.73208445", "0.73158014", "0.73154396", "0.7311118", "0.730612", "0.73048425", "0.729951", "0.72905934", "0.7287754", "0.72857875", "0.72821677", "0.72692406", "0.7260232", "0.7260144", "0.7257958", "0.7256541", "0.7254924", "0.7251648", "0.72493637", "0.72374105", "0.72292376", "0.72205913", "0.72167385", "0.72133994", "0.72120255", "0.7210468", "0.72092336", "0.7209177", "0.7204808", "0.720447", "0.72020984", "0.72020614", "0.7199514", "0.71980405", "0.7195529", "0.7183223", "0.71714336", "0.71700704", "0.7160161", "0.7159867", "0.7157625", "0.7156295" ]
0.0
-1
Sorts a list of tuples based on the first value in each.
def SortTupleList(TupleList): SortedList = [] for i in range(0, len(TupleList)): Pivot = len(SortedList) / 2 while TupleList[i] < SortedList(Pivot): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort_list_of_tuples(list):\n list.sort(key=lambda x: x[0])\n return list", "def tupleListSort(tupleList):\n tupleList.sort(key=lambda y: y[0].lower())", "def sort_fst(xs):\n return sorted(xs, key=lambda pair: pair[0])", "def sort_prices(list_of_tuples):\n list_of_tuples.sort(key = get_price, reverse = True)\n return list_of_tuples", "def tuple_sorted(a):\r\n if ((isinstance(a, int) == True) or (isinstance(a, str) == True)):\r\n return a\r\n if ((isinstance(a[0], int) == True) or (isinstance(a[0], str) == True)):\r\n return sorted(a)\r\n else:\r\n w = []\r\n for b in a:\r\n w.append(tuple(tuple_sorted(b)))\r\n return tuple(sorted(tuple(w)))", "def sortTuple(lstTuples, element):\n\n lstTuples.sort(key=lambda x: x[element-1])\n return lstTuples", "def sort_1(l):\n pass", "def __sort(self, _list, _index, desc, pop_first_element=False):\n if _index != 0:\n _list = [(x[_index], x) for x in _list]\n \n _list.sort()\n \n if desc:\n _list.reverse()\n\n if _index != 0 or pop_first_element: \n _list = [x[1] for x in _list]\n\n return _list", "def sort_L3():\n for item in d_list:\n item.sort(key=operator.itemgetter(1))", "def sort_by_return(list_of_tuples):\n list_of_tuples = sorted(list_of_tuples, key=lambda item: item[0])\n left_side = list_of_tuples[0:2]\n right_side = list_of_tuples[2:4]\n left_side = sorted(left_side, key=lambda item: item[1])\n right_side = sorted(right_side, key=lambda item: item[1])\n result = left_side + right_side\n return result", "def to_sorted_points(x):\n return tuple(sorted(x))", "def process_tuples_sorted(self):\n return sorted(self.process_tuples, key=lambda process_tuple: process_tuple[0].name)", "def mysort(lst: List[T], compare: Callable[[T, T], int]) -> List[T]:\n temp = lst\n switched = True\n while switched:\n switched = False\n for i in range(len(temp) - 1):\n if compare(temp[i], temp[i + 1]) == 1:\n temp[i], temp[i + 1] = temp[i + 1], temp[i]\n switched = True\n\n return temp", "def sorting(recommendation: List[Tuple[str, int]]) -> None:\n \n for tup in range(len(recommendation)):\n score = recommendation[tup][1]\n alpha = recommendation[tup][0]\n for j in range(tup + 1, len(recommendation)):\n if recommendation[j][1] > score or \\\n (recommendation[j][1] == score and recommendation[j][0] < alpha):\n recommendation[j], recommendation[tup] = recommendation[tup], \\\n recommendation[j]", "def mysort(lst: List[T], compare: Callable[[T, T], int]) -> List[T]:\n for i in range(1, len(lst)): #loops through each element starting at the second one\n for j in range(i, 0, -1): #loops through each element coming before i starting at i and going backwards\n if compare(lst[j], lst[j-1]) < 0: #checks to see if the previous element is smaller than the current (by saying <0 we keep the sort stable as well)\n lst[j], lst[j-1] = lst[j-1], lst[j] #if they are, we switch them\n else:\n break #if they are not, we know that the element is in its proper place\n return lst", "def sort_0(l):\n l.sort()", "def sort(lst):\n \"*** YOUR CODE HERE ***\"\n if len(lst) <= 0:\n return []\n return [min(lst)] + sort(remove_first(lst, min(lst)))", "def test_signed_sort(self):\r\n\r\n # an empty list must be returned when an empty list needs to be sorted\r\n self.assertEqual(signed_natsort([]), [])\r\n\r\n # tuples that can be sorted by type-casting the first element\r\n test_list = [('9', 'SampleA'), ('-1', 'SampleD'), ('7', 'SampleC'),\r\n ('-2', 'SampleE'), ('-0.11',\r\n 'SampleF'), ('17.11', 'SampleB'),\r\n ('100', 'SampleG'), ('13', 'SampleH')]\r\n expected_result = [('-2', 'SampleE'), ('-1', 'SampleD'),\r\n ('-0.11', 'SampleF'), ('7',\r\n 'SampleC'), ('9', 'SampleA'),\r\n ('13', 'SampleH'), ('17.11', 'SampleB'), ('100', 'SampleG')]\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # tuples that must be sorted alphabetically\r\n test_list = [('Cygnus', 'SampleA'), ('Cepheus', 'SampleD'),\r\n ('Auriga', 'SampleC'), ('Grus',\r\n 'SampleE'), ('Hydra', 'SampleF'),\r\n ('Carina', 'SampleB'), ('Orion', 'SampleG'), ('Lynx', 'SampleH')]\r\n expected_result = [('Auriga', 'SampleC'), ('Carina', 'SampleB'),\r\n ('Cepheus', 'SampleD'), ('Cygnus',\r\n 'SampleA'), ('Grus', 'SampleE'),\r\n ('Hydra', 'SampleF'), ('Lynx', 'SampleH'), ('Orion', 'SampleG')]\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # mixed case, tuples will be sorted alpha-numerically\r\n test_list = [('Cygnus', 'SampleA'), ('Cepheus', 'SampleD'),\r\n ('Auriga', 'SampleC'), ('Grus',\r\n 'SampleE'), ('-0.11', 'SampleF'),\r\n ('17.11', 'SampleB'), ('100', 'SampleG'), ('Lynx', 'SampleH')]\r\n expected_result = [('17.11', 'SampleB'), ('100', 'SampleG'),\r\n ('-0.11', 'SampleF'), ('Auriga',\r\n 'SampleC'), ('Cepheus', 'SampleD'),\r\n ('Cygnus', 'SampleA'), ('Grus', 'SampleE'), ('Lynx', 'SampleH')]\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # mixed case just a list\r\n test_list = ['foo', 'bar', '-100', '12', 'spam', '4', '-1']\r\n expected_result = ['4', '12', '-1', '-100', 'bar', 'foo', 'spam']\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # list of elements that can be type-casted\r\n test_list = ['0', '1', '14', '12', '-15', '4', '-1']\r\n expected_result = ['-15', '-1', '0', '1', '4', '12', '14']\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # mixed dict case\r\n test_dict = {\r\n 'foo': 'a', 'bar': 'b', '-100': '1', '12': '11', 'spam': 'q',\r\n '4': '11', '-1': 'e'}\r\n expected_result = ['4', '12', '-1', '-100', 'bar', 'foo', 'spam']\r\n\r\n output = signed_natsort(test_dict)\r\n self.assertEquals(output, expected_result)\r\n\r\n # dict where the keys can be type-casted\r\n test_dict = {\r\n '0': 'foo', '1': 'bar', '14': 'stand', '12': 'eggs', '-15': 'q',\r\n '4': 'b', '-1': 'h'}\r\n expected_result = ['-15', '-1', '0', '1', '4', '12', '14']\r\n\r\n output = signed_natsort(test_dict)\r\n self.assertEquals(output, expected_result)", "def sortn(xs):\n return sorted(xs, key=sortnkey)", "def sort_list(list, key):\r\n list.sort(lambda x,y: cmp(key(x), key(y))) # Python < 2.4 hack\r\n return list", "def bubble_sort(data_list_or_tuple):\n data_list = list(data_list_or_tuple)\n for count, _ in enumerate(data_list, 1):\n for x in range(len(data_list)-count):\n if data_list[x] > data_list[x+1]:\n data_list[x], data_list[x+1] = data_list[x+1], data_list[x]\n return data_list", "def _sort_on_first_sequence(x: Tensor, y: Tensor) ->Tuple[Tensor, Tensor]:\n y = torch.clone(y)\n x, y = x.T, y.T\n x, perm = x.sort()\n for i in range(x.shape[0]):\n y[i] = y[i][perm[i]]\n return x.T, y.T", "def sort_tuple(tup, n=2):\n return tuple(sorted(tup, key=lambda t: t[:n]))", "def sort_priority(todo_list):\n for index in range(len(todo_list)):\n smallest_index = find_index_of_smallest_after(todo_list, index)\n swap_values_at_indexes(todo_list, index, smallest_index)", "def sort(self,xy):\n xy.sort()\n #print xy\n x0=xy[0][0] # x of first tuple\n listy=[] # list of list of y values for given x\n listx=[] # list of x values\n ll=[]\n for i in xy:\n if(i[0] == x0): # change of x\n ll.append(i[1])\n else:\n listy.append(ll)\n listx.append(x0)\n ll=[]\n ll.append(i[1])\n x0=i[0]\n listy.append(ll)\n listx.append(x0)\n return listx,listy", "def s_sort(l):\r\n for x in range(len(l)): # while 1st index in range of length of list...\r\n min_i = x # set 1st index as current min index, to be referenced/compared against\r\n\r\n for y in range(x + 1, len(l)): # while 2nd index in range of 1st+1 & length of list...\r\n if l[min_i] > l[y]: # if 1st is greater than 2nd, set 2nd as new min index\r\n min_i = y\r\n\r\n l[x], l[min_i] = l[min_i], l[x] # swap new min with original reference\r", "def selection_sort(unsorted_list):\r\n\tsorted_list = list(unsorted_list)\r\n\tfor i in range(0, len(sorted_list), 1):\r\n\t\tmin_index = i\r\n\t\tfor j in range(i + 1, len(sorted_list), 1):\r\n\t\t\tif sorted_list[j] < sorted_list[min_index]:\r\n\t\t\t\tmin_index = j\r\n\t\tsorted_list[i], sorted_list[min_index] = sorted_list[min_index], sorted_list[i]\r\n\treturn sorted_list", "def ssort(mylist):\n comparisionCount = 0\n for position in range(len(mylist)):\n # Assume the current position is the minimum element\n minPosition = position\n for nextPos in range(position+1,len(mylist)):\n comparisionCount = comparisionCount + 1 # Comparing the values in the positions\n if (mylist[minPosition]>mylist[nextPos]):\n # Remembering the position of the minimum value\n minPosition = nextPos\n\n comparisionCount = comparisionCount + 1 #Comparing the positions\n if position != minPosition:\n # Swap the numbers\n mylist[minPosition],mylist[position] = mylist[position],mylist[minPosition]\n return (mylist,comparisionCount)", "def sort(List):\n if not isinstance(List, (list, tuple)):\n raise TypeError(\"Argument must be list or tuple\")\n List = list(List).copy()\n sorted = False\n iter = len(List) - 1\n while (sorted == False):\n sorted = True\n for i in range(iter):\n if List[i] > List[i+1]:\n List[i],List[i+1] = List[i+1],List[i]\n sorted = False\n iter -= 1\n\n return List", "def qsort(l):\n if len(l) <= 1:\n return l\n return qsort([lt for lt in l[1:] if lt < l[0]]) + l[0:1] + \\\n qsort([ge for ge in l[1:] if ge >= l[0]])", "def natsorted(lst):\n return sorted(lst, key=natsort_key)", "def reorder(l: List[Any]) -> List[Any]:\n sorted_list: List[Any] = list()\n sorted_list.append(l[0])\n for i in range(1, len(l)):\n index: int = 0\n while index < i and l[i] > sorted_list[index]:\n index += 1\n sorted_list.insert(index, l[i])\n return sorted_list", "def word_count_sort(word_count_list):\n\n for index in range(1, len(word_count_list)):\n # initialize pointers\n value = word_count_list[index] # starts at the tuple in index 1\n position = index - 1 # initialize to start at 0\n\n # move items to a higher index position while their value is less than the value at the next index\n # compare values in tuple[1] but swap entire tuple\n while position >= 0 and word_count_list[position][1] < value[1]:\n word_count_list[position + 1] = word_count_list[position] # swap the tuple at position into next index\n position -= 1 # decrement to fill lower index and break loop\n\n word_count_list[position + 1] = value # move higher number left one index\n\n return word_count_list", "def sort_4(l):\n l = list(set(l))\n l.sort()", "def sort(student_list):\n for i in range(len(student_list) - 1):\n for x in range(len(student_list) - 1):\n if student_list[x] > student_list[x + 1]:\n student_list[x], student_list[x + 1] = \\\n student_list[x + 1], student_list[x]", "def _sorted_items(x):\n return sorted(x.items(), key=lambda x: x[0])", "def sort_by_another(to_sort, basis):\n return [x for (y, x) in sorted(zip(basis, to_sort), key=lambda pair: pair[0])]", "def sort(xs):\n for i in range(0, len(xs) - 1):\n for j in range(i + 1, len(xs)):\n if xs[j] > xs[i]:\n xs[j], xs[i] = xs[i], xs[j]", "def sort(points):\n if len(points) == 0:\n return []\n \n starting_vertex = min(points)\n reference_point = starting_vertex + Point2D(0, 1)\n \n return sorted(points, key=partial(\n get_angle_and_distance, point_2=starting_vertex, point_3=reference_point\n ))", "def natsort(lst):\n lst.sort(key=natsort_key)", "def selection_sort(L):\n for i in range(len(L)):\n # Find the index of the smellest item in L[i:] and swap that item with the item at index i.\n\n index_of_smallest = get_index_of_smallest(L,i)\n L[index_of_smallest], L[i] = L[i], L[index_of_smallest]", "def anythingSort(L):\n return internalSort(L, 0, len(L) - 1)", "def sort_list(self,list_):\r\n list_.sort()", "def _sorted_occurrence_tuples(\n occurrences: Dict[str, List[int]]\n ) -> List[Tuple[str, int]]:\n return sorted(\n ((raw, idx) for raw in occurrences.keys() for idx in occurrences[raw]),\n # Sort first by position, then by lexical (for stability)\n key=lambda x: (x[1], x[0]),\n )", "def sorting(my_list):\n for indx in range(1,len(my_list)):\n i=indx\n while i>0:\n if my_list[i]<my_list[i-1]:\n temp=my_list[i-1]\n my_list[i-1]=my_list[i]\n my_list[i]=temp\n i=i-1\n return my_list", "def sorting(tokens: list):\n tokens.sort(key=lambda x: (x[0], x[1]))", "def sort(packed, ref, reverse=True):\r\n assert (isinstance(packed, tuple) or isinstance(packed, list)) and isinstance(ref, list)\r\n packed = [ref] + [range(len(ref))] + list(packed)\r\n sorted_packed = [list(t) for t in zip(*sorted(zip(*packed), reverse=reverse))]\r\n return tuple(sorted_packed[1:])", "def qsort(l):\n if len(l) < 2:\n return l\n h, t = l[0], l[1:]\n return qsort([u for u in t if u < h]) + [h] + qsort([u for u in t if u >= h])", "def sort_suggestions(\n suggestions: List[Tuple[Set[str], float]]\n) -> List[Tuple[Set[str], float]]:\n confidence_list = [suggestion[1] for suggestion in suggestions]\n sort_index = sorted(range(len(confidence_list)), key=lambda k: confidence_list[k])\n # Inverse the sort\n sort_index = sort_index[::-1]\n return [suggestions[i] for i in sort_index]", "def sortList(lst, reverse=False, key=None):\n return sorted(lst, key=key, reverse=reverse)", "def sort_data(data):\n data.sort(key=itemgetter(3,2))\n return data", "def sort_3(l):\n l.sort(reverse=True)", "def sort(packed, ref, reverse=True):\n assert (isinstance(packed, tuple) or isinstance(packed, list)) and isinstance(ref, list)\n packed = [ref] + [range(len(ref))] + list(packed)\n sorted_packed = [list(t) for t in zip(*sorted(zip(*packed), reverse=reverse))]\n return tuple(sorted_packed[1:])", "def langsort_tuples (lst, index, lang=None):\n\n reset_locale = _set_lang_locale(lang)\n lst.sort(lambda x, y: locale.strcoll(x[index], y[index]))\n reset_locale()", "def sort_points(*pts):\n npts = len(pts)\n points = []\n angles = []\n #sort args by angle relative to x, c.c.w\n def _angle(v):\n # cartesian angle is always btwn 0 and 180\n angle = cartesian_angle(v,[1.,0.])\n if (v[1] < 0.):\n return 360. - angle\n else:\n return angle\n for v in pts:\n v = num.array(v[0:2])\n an = _angle(v)\n j = 0\n while j < npts -1:\n if j > len(points)-1: break\n if an < angles[j]: break\n else: j = j + 1\n points.insert(j,v)\n angles.insert(j,an)\n return (points,angles)", "def sort_probs(probs_list):\n return sorted(probs_list, key=lambda x: x[1])", "def insertionSort(list):", "def selection_sort(unsorted_list):\n if len(unsorted_list) <= 1:\n return unsorted_list\n for index in range(len(unsorted_list)):\n lowest_number = index\n for i in range(index, len(unsorted_list)):\n if unsorted_list[lowest_number] > unsorted_list[i]:\n lowest_number = i\n unsorted_list[index], unsorted_list[lowest_number] = unsorted_list[lowest_number], unsorted_list[index]\n return unsorted_list", "def sort(\n tuples: Collection[Tuple[_T, _T]],\n allitems: Collection[_T],\n deterministic_order: bool = True,\n) -> Iterator[_T]:\n\n for set_ in sort_as_subsets(tuples, allitems):\n yield from set_", "def sort(self):\r\n self.list.sort(key=lambda x: ''.join(x))", "def quick_sort(unsorted_list):\r\n\tsorted_list = list(unsorted_list)\r\n\tless = []\r\n\tequal = []\r\n\tgreater = []\r\n\tif len(sorted_list) > 1:\r\n\t\tpivot = sorted_list[0]\r\n\t\tfor item in sorted_list:\r\n\t\t\tif item < pivot:\r\n\t\t\t\tless.append(item)\r\n\t\t\telif item == pivot:\r\n\t\t\t\tequal.append(item)\r\n\t\t\telif item > pivot:\r\n\t\t\t\tgreater.append(item)\r\n\t\treturn quick_sort(less) + equal + quick_sort(greater)\r\n\telse:\r\n\t\treturn sorted_list", "def _cmplx_sort(p):\n indx = cupy.argsort(cupy.abs(p))\n return cupy.take(p, indx, 0), indx", "def selection_sort(input_list):\n for i in range(len(input_list)-1):\n ## By slicing the list incrementally, swap first element with lowest\n ## element of sliced list\n temp_item = input_list[i]\n smallest_index = smallest_elem_index(input_list[i:])\n input_list[i] = input_list[smallest_index+i]\n input_list[smallest_index+i] = temp_item\n return input_list", "def signed_natsort(data):\r\n\r\n # list is empty, do nothing\r\n if not data:\r\n return data\r\n\r\n # deal with non-[tuple, dict, list] types of data\r\n if not all([isinstance(element, tuple) or isinstance(element, list) or\r\n isinstance(element, dict) for element in data]):\r\n try:\r\n return sorted(data, key=float)\r\n except ValueError:\r\n return natsort(data)\r\n\r\n # deal with tuples type of data, the first element can be a real number or\r\n # a string, the second element is a string that won't be accounted\r\n try:\r\n return sorted(data, key=lambda tup: float(tup[0]))\r\n except ValueError:\r\n return natsort(data)", "def sort_list_by_president_order(pronoun_proportion_list):\n return sorted(pronoun_proportion_list, key=lambda (k,d,v): (d,k,v))", "def bubble_sort(first):\n # iterate len(lst) times\n for i in range(len(first)):\n\n # integrate [len(lst) - i - 1] times\n for j in range(len(first) - i - 1):\n\n # sort two number if not sorted\n if first[j] > first[j + 1]:\n # swap element at j with element at j + 1\n # and element ad j + 1 with element j\n first[j], first[j + 1] = first[j + 1], first[j]", "def sort(self, *args, **kargs):\n list.sort(self, *args, **kargs)\n self.emit('modified')", "def front_x(some_list):\n #This funstion will sort every element in list but every element that start with \"x\" come first.\n new_list = []\n new_list_x = []\n for k in some_list:\n #print(k)\n if k.find(\"x\") == 0:\n #print(k.find(\"x\"))\n new_list_x.append(k) \n else:\n new_list.append(k)\n return sorted(new_list_x) + sorted(new_list)", "def sort2(x, y):\n if y < x:\n return y, x\n return x, y", "def sort_vertices(*args, **kwargs):\n return sorted(*args, **kwargs)", "def reOrderListOfListByFirstMember(listOfList=None):\n\tfirstList = listOfList[0]\n\tx_ar = numpy.array(firstList, numpy.float)\n\t#sort x_ar and y_ar must be in the order of x_ar\n\tindexOfOrderList = numpy.argsort(x_ar)\n\treturnListOfList = []\n\tfor ls in listOfList:\n\t\tar = numpy.array(ls, numpy.float)\n\t\tar = ar[indexOfOrderList]\n\t\treturnListOfList.append(ar)\n\treturn PassingData(listOfList=returnListOfList)", "def _mySort(self, alist):\n return sorted(alist, key=lambda x: (x[0].isdigit(), x.lower()))", "def natsort(lst: List[str]) -> None:\n lst.sort(key=natsort_key)", "def sort_variables(variables):\n return tuple(sorted(variables, key=lambda v: (v.rank, v.shift)))", "def mech_tuples_sorted(self):\n return sorted(self.mech_tuples, key=lambda mech_tuple: mech_tuple[0].name)", "def selection_sort(my_list):\n if len(my_list) < 2:\n return my_list\n for index in range(0, len(my_list)-1, +1):\n index_of_min = index\n for location in range(index, len(my_list)):\n if my_list[location] < my_list[index_of_min]:\n index_of_min = location\n\n temp = my_list[index]\n my_list[index] = my_list[index_of_min]\n my_list[index_of_min] = temp\n\n return my_list", "def custom_sort(x, y):\n if x[1] == y[1]:\n return 1 if x[0] > y[0] else -1\n return cmp(y[1], x[1])", "def sortednameslist(nameslist):\n sortednames = sorted(nameslist, key=lambda x: x[1])\n return sortednames", "def sorting_alg(self, my_list):\n for i in range(len(my_list)):\n for j in range(i+1, len(my_list)):\n if my_list[i] > my_list[j]:\n my_list[i], my_list[j] = my_list[j], my_list[i]\n #print(my_list)\n #sleep(1)\n return my_list", "def _toposort_with_ordered_mech_tuples(self, data):\n result = []\n for dependency_set in toposort(data):\n d_iter = iter(dependency_set)\n result.extend(sorted(dependency_set, key=lambda item : next(d_iter).mechanism.name))\n return result", "def sort(values, comp_func):\n\n \"\"\"\n 昇順\n comp_func = lambda a, b: a if a<b else b\n\n 降順\n comp_func = lambda a, b: a if a>b else b\n\n 偶数昇順、奇数昇順\n comp_func = lambda a, b: a if \\\n a % 2 == 0 and b % 2 == 1 else \\\n (b if b%2==0 and a%2==1 else (a if a<b else b))\n \"\"\"\n\n num = len(values)\n for i in range(0, num):\n tmp_value = values[i]\n tmp_index = i\n for j in range(i, num):\n if tmp_value != comp_func(values[j], tmp_value):\n tmp_index = j\n tmp_value = comp_func(values[j], tmp_value)\n values[tmp_index] = values[i]\n values[i] = tmp_value\n \"\"\"\n values.remove(tmp_value)\n values.insert(0, tmp_value)\n new_list.insert(0, tmp_value)\n \"\"\"\n print(values)\n\n return", "def sort(seq):\r\n if len(seq) <= 1:\r\n return seq\r\n else:\r\n pivot = seq[0]\r\n left, right = [], []\r\n for x in seq[1:]:\r\n if x < pivot:\r\n left.append(x)\r\n else:\r\n right.append(x)\r\n return sort(left) + [pivot] + sort(right)", "def _sort(self, row):\n if not self._head:\n self._head = self._create_head(row)\n if self._args.head:\n return row\n\n if 'key' not in self._state:\n self._state['key'] = self._replace_fields(self._args.key)\n\n r = list(map(self._convert, row))\n self._sorting_insert(self._result, r, key=lambda r: eval(self._state['key']))", "def sort(seq):\r\n for i in range(0, len(seq)):\r\n iMin = i\r\n for j in range(i+1, len(seq)):\r\n if seq[iMin] > seq[j]:\r\n iMin = j\r\n if i != iMin:\r\n seq[i], seq[iMin] = seq[iMin], seq[i]\r\n\r\n return seq", "def sort(*, list : Union[List[Any], ConduitVariable], reverse : bool = False) -> None:\n list.sort(key = None, reverse = reverse)", "def sort_col(col):\n return (col[0], sorted(col[1], key=lambda pair: pair[0]))", "def quick_sort(items):\n if not items:\n return []\n\n pivots = [i for i in items if i == items[0]]\n lesser = quick_sort([i for i in items if i < items[0]])\n greater = quick_sort([i for i in items if i > items[0]])\n\n return lesser + pivots + greater", "def sort_by_rgb(colors_tuple):\n sorted_tuple = sorted(colors_tuple, key=lambda x:x[1])\n return sorted_tuple", "def sort_list():\n fun_list = basic_list_exception.make_list()\n fun_list.sort()\n return fun_list", "def pyargsort(seq,cmp=None,key=lambda x:x):\n return sorted(list(range(len(seq))),key=lambda x:key(seq.__getitem__(x)),cmp=None)", "def selection_sort(mylist):\n for j in range(len(mylist)):\n current_min = j\n for i in range(j+1, len(mylist)):\n if mylist[i] < mylist[current_min]:\n current_min = i\n\n if current_min != j:\n _swap(mylist, current_min, j)", "def sort_unit_lst(self, attrname, lst2sort):\n comp = []\n for unit in lst2sort:\n importance = self._importance_rank(unit, attrname)\n comp.append((unit, importance))\n comp = sorted(comp, key= lambda x: x[1], reverse=True)\n\n return [x[0] for x in comp]", "def sort(self):\n self.list.sort(key=lambda x: ''.join)", "def gnomesort(self):\n # nothing to do if we're empty or singleton\n if len(self) < 2:\n return\n # start with second element, and always compare to the element before\n current = self.first.next\n while current is not None:\n # thus current must have a .prev\n # If this element is unsorted with the element before it, then\n if current.prev and current.value < current.prev.value:\n # swap this element with the element before it\n # using insert_after and pop_before is an easy way to handle first/last identities\n self.insert_after(current, self.pop_before(current))\n # and then check the new previous-element.\n else:\n # advance to next node (or None if this is the last node in the list, in which case we terminate)\n current = current.next", "def sort_mixed(iterable):\n return sorted(iterable, key=lambda x: split_string_at_numbers(x))", "def python_sort(a_list):\n \n start_time = time.time()\n\n a_list.sort()\n\n end_time = time.time()\n\n run_time = end_time - start_time\n\n return (run_time, a_list)", "def selection_sort(items):\n # TODO: Repeat until all items are in sorted order\n # TODO: Find minimum item in unsorted items\n # TODO: Swap it with first unsorted item\n for x in range(len(items)):\n smallest_index = x\n if x!=len(items)-1:\n for y in range(x+1, len(items)):\n if items[y] < items[smallest_index]:\n smallest_index = y\n temp = items[x]\n items[x] = items[smallest_index]\n items[smallest_index] = temp", "def insertionsort(A:list) -> \"void\":\n\tfor j in range(1, len(A)):\n\n\t\tkey = A[j]\n\t\ti = j - 1\n\n\t\twhile i >= 0 and A[i] > key:\n\t\t\tA[i+1] = A[i]\n\t\t\ti = i - 1\n\n\t\tA[i+1] = key", "def sorted_scores(scores):\n\treturn sorted(scores, key=lambda sailor: (total_score(sailor), sailor[1][0]))", "def sortByValue(d):\r\n items=d.items()\r\n backitems=[ [v[1],v[0]] for v in items]\r\n backitems.sort(); backitems.reverse()\r\n return [ backitems[i][1] for i in range(0,len(backitems))]" ]
[ "0.85222363", "0.7436253", "0.7205574", "0.7185881", "0.7109334", "0.70277673", "0.6577772", "0.6568136", "0.6405855", "0.6328426", "0.6323056", "0.63180023", "0.6280642", "0.6245652", "0.6225239", "0.6207625", "0.61590034", "0.6151864", "0.6146145", "0.61375666", "0.60629576", "0.6055418", "0.605437", "0.6047977", "0.6032295", "0.60214335", "0.5955259", "0.5945143", "0.59451157", "0.59447336", "0.5928909", "0.5925734", "0.5916407", "0.5912174", "0.59120846", "0.5891043", "0.58852756", "0.5884815", "0.5881697", "0.58629", "0.58103925", "0.5805352", "0.57765305", "0.5770803", "0.5760286", "0.57580864", "0.57553667", "0.5755164", "0.5741765", "0.57354385", "0.5716082", "0.5709255", "0.57069707", "0.5701357", "0.5701102", "0.568665", "0.56766", "0.5667132", "0.565317", "0.5650549", "0.56502396", "0.5648614", "0.56468415", "0.5637617", "0.56321067", "0.56306493", "0.5626473", "0.56184995", "0.5611928", "0.5611387", "0.5608191", "0.5605668", "0.5596695", "0.5579998", "0.5578129", "0.55690813", "0.55662006", "0.55550694", "0.55521864", "0.5550785", "0.55460984", "0.55442375", "0.5544176", "0.5526522", "0.55137426", "0.55114645", "0.55027616", "0.5487094", "0.5480435", "0.547401", "0.54715586", "0.5467496", "0.54668635", "0.5461211", "0.5459713", "0.54545635", "0.5454494", "0.5454006", "0.5450053", "0.54303354" ]
0.6534301
8
Check if uuid_to_test is a valid UUID.
def is_valid_uuid(uuid_to_test, version=4): try: uuid_obj = UUID(uuid_to_test, version=version) except ValueError: return False return str(uuid_obj) == uuid_to_test
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_valid_uuid(uuid_to_test, version=4):\n\ttry:\n\t\tuuid_obj = UUID(uuid_to_test, version=version)\n\t\treturn True\n\texcept:\n\t\treturn False", "def check_uuid(uuid):\n try:\n converted = UUID(uuid, version=4)\n except ValueError:\n return False\n\n return str(converted) == uuid", "def validate_uuid(self, uuid):\n match = re.match(\n r'([a-z0-9]+)-([a-z0-9]+)-([a-z0-9]+)-([a-z0-9]+)-([a-z0-9]+)',\n uuid\n )\n if match:\n return True\n\n return False", "def validate_uuid(uuid_string):\n try:\n UUID(uuid_string, version=4)\n return True\n except:\n return False", "def validate_uuid(self, uuid_to_check):\r\n if re.fullmatch(BASE62_REGEX, uuid_to_check):\r\n return 20 <= len(uuid_to_check) <= 22\r\n else:\r\n return False", "def validate_uuid(data):\n\n if not uuidutils.is_uuid_like(data):\n raise exceptions.DiagnoseException(\n \"'%s' is not a valid UUID\" % data)", "def check_int_uuid(uuid):\n try:\n converted = UUID(int=uuid, version=4)\n except ValueError:\n return False\n\n return converted.int == uuid", "def is_uuid_like(val):\n try:\n return str(uuid.UUID(val)) == val\n except (TypeError, ValueError, AttributeError):\n return False", "def is_uuid_like(val):\n try:\n return str(uuid.UUID(val)) == val\n except (TypeError, ValueError, AttributeError):\n return False", "def is_valid_uuid_string(uuid_str):\n return isinstance(uuid_str, str) and VALID_UUID_REGEX.match(uuid_str)", "def _verify_uuid(given_uuid):\n\n\t\tif isinstance(given_uuid, str) or isinstance(given_uuid, unicode):\n\t\t\t# Verify the given string is well-formed\n\t\t\tuuid.UUID(given_uuid)\n\t\t\treturn given_uuid\n\n\t\tif isinstance(given_uuid, uuid.UUID):\n\t\t\treturn given_uuid.__str__()\n\n\t\traise ValueError(\"Given object is neither a string nor a UUID object.\")", "def is_uuid(value: str | UUID) -> bool:\n if isinstance(value, str):\n return _uuid.match(str(value)) and True or False\n\n return isinstance(value, UUID)", "def is_uuid4(uuid_string):\n try:\n UUID(uuid_string, version=4)\n except (ValueError, TypeError):\n # If it's a value error, then the string\n # is not a valid hex code for a UUID.\n # None will raise TypeError.\n return False\n return True", "def is_uuid(my_object):\n try:\n my_uuid = uuid.UUID(my_object, version=4)\n except ValueError:\n return False\n return str(my_uuid) == my_object", "def is_uuid_v4(uuid_or_name):\n # Based on https://gist.github.com/ShawnMilo/7777304\n try:\n uuid = UUID(uuid_or_name, version=4)\n except Exception:\n return False\n\n return uuid.hex == uuid_or_name.replace(\"-\", \"\")", "def is_uuid(self) -> bool:\n ua = self.user_agent.strip('({})')\n if len(ua) >= 2 and ua[1] == ':':\n ua = self.user_agent[2:]\n\n return uuid_like_name(ua)", "def test_uuid_assignment(self):\n game = Game()\n game.uuid = '01234567-abcd-abcd-abcd-0123456789ab'\n assert isinstance(game.uuid, uuid.UUID), 'Value should be a UUID.'", "def test_get_shortuuid_uuid(self):\n id = get_shortuuid()\n self.assertTrue(len(id) == 22)", "def test_api_object_uuid(self, api_object):\n assert isinstance(api_object.uuid_, uuid.UUID)", "def test_uuid_adapter(self):\n with self.assertRaises(TypeError):\n adapter = UUIDAdapter('01234567-0123-0123-0123-0123456789ab')", "def test_bad_uuid_uppercase():\n bad_uiid_upper = \"7CFB2470-B600-4EB3-A2CD-C1439E45B91G\"\n m = CannedRe.UUID.match(bad_uiid_upper)\n assert m is None, \"Canned RegEx uiid test succeeded for %s while it should not\" % bad_uiid_upper", "def test_invlalid_uuid_load():\n schema = UUIDSchema()\n result = schema.load({\n \"uuid_str\": INVALID_UUID_STR,\n \"uuid_uuid\": UUID(UUID_STR),\n })\n\n assert_that(result.errors[\"uuid_str\"], contains('Not a valid UUID.'))", "def validate_uuid(value: Any, none_allowed: bool, display_name: str) -> None:\n if none_allowed and value is None:\n return\n\n if not isinstance(value, UUID) or value.version != 4:\n raise TypeError(f\"{display_name} must be a UUID version 4\")", "def test_uuid():\n for _ in range(1000):\n uuid = uuid_generator()\n assert len(uuid) == 36\n assert uuid.count('-') == 4", "def test_bad_uuid_lowercase():\n bad_uiid_lower = \"7cfb2470-b600-4eb3-a2cd-c1439e45b91g\"\n m = CannedRe.UUID.match(bad_uiid_lower)\n assert m is None, \"Canned RegEx uiid test succeeded for %s while it should not\" % bad_uiid_lower", "def valid_uuid(self, rule_to_generate_uuid, tag_index):\r\n self.required_fields[UUID].attributefound()\r\n self.required_fields_index[self.required_fields[UUID].position].increment_count()\r\n\r\n rule_uuid = {UUID: str(baseconv.base62.encode(uuid.uuid4().int))}\r\n if self.valid_metadata_index(rule_to_generate_uuid, tag_index):\r\n if list(rule_to_generate_uuid[METADATA][tag_index].keys())[0] == UUID:\r\n if self.validate_uuid(list(rule_to_generate_uuid[METADATA][tag_index].values())[0]):\r\n self.required_fields[UUID].attributevalid()\r\n else:\r\n self.required_fields[UUID].attributeinvalid()\r\n else:\r\n rule_to_generate_uuid[METADATA].insert(tag_index, rule_uuid)\r\n self.required_fields[UUID].attributevalid()\r\n else:\r\n rule_to_generate_uuid[METADATA].append(rule_uuid)\r\n self.required_fields[UUID].attributevalid()\r\n\r\n return self.required_fields[UUID].valid", "def check_uuid(self, obj, groupid):\n if self.get_uuid(obj) == groupid:\n return True", "def test_uuid_created():\n assert type(sc.current.id) == str", "def test_good_uuid_uppercase():\n good_uuid_uppercase = \"7CFB2470-B600-4EB3-A2CD-C1439E45B91F\"\n m = CannedRe.UUID.match(good_uuid_uppercase)\n # print getmembers(m)\n assert m is not None, \"Canned RegEx UUID test failed for %s\" % good_uuid_uppercase\n assert m.string == good_uuid_uppercase", "def test_random_uuid(self):\n movie = Movie.objects.get(title='The Two Towers')\n assert isinstance(movie.id, uuid.UUID), ( 'Expected UUID, got %s.' %\n movie.id.__class__.__name__ )\n tt_uuid = str(movie.id)\n self.assertEqual(tt_uuid[14], '4')\n assert tt_uuid[19] in ('8', '9', 'a', 'b'), 'Invalid random UUID.'", "def test_hardware_uuid_value(self):\n \n hardware_uuid = get_uuids()[1]\n \n # Check to make sure it contains \"-\"\n # Exact matching is not done on purpose - UUIDs should be kept private!\n self.assertIn(\"-\", hardware_uuid)", "def from_uuid(self):\n reason = \"[!] UUID's are in the format 00000000-0000-0000-0000-000000000000\"\n ts_type = self.ts_types['uu']\n try:\n uuid_lower = self.uu.lower()\n UUID_REGEX = re.compile('[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')\n if not bool(UUID_REGEX.match(uuid_lower)):\n self.in_uuid = indiv_output = combined_output = False\n pass\n else:\n u = uuid.UUID(uuid_lower)\n if u.version == 1:\n unix_ts = int((u.time / 10000) - 12219292800000)\n self.in_uuid = dt.utcfromtimestamp(float(unix_ts) /1000.0).strftime('%Y-%m-%d %H:%M:%S.%f')\n else:\n pass\n indiv_output = str(\"{} {}\".format(ts_type, self.in_uuid))\n combined_output = str(\"{}{}\\t\\t\\t{} UTC{}\".format(self.left_color, ts_type, self.in_uuid, self.right_color))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_uuid = indiv_output = combined_output = False\n return self.in_uuid, indiv_output, combined_output, reason", "def test_hardware_uuid_type(self):\n \n boot_session_uuid = get_uuids()[2]\n \n # Check to make sure the returned value is a string\n self.assertEqual(type(boot_session_uuid), str)", "def test_hardware_uuid_type(self):\n \n hardware_uuid = get_uuids()[1]\n \n # Check to make sure the returned value is a string\n self.assertEqual(type(hardware_uuid), str)", "def test_kernel_uuid_value(self):\n \n kernel_uuid = get_uuids()[0]\n \n # Check to make sure it contains \"-\"\n # Exact matching is not done on purpose - UUIDs should be kept private!\n self.assertIn(\"-\", kernel_uuid)", "def test_uuid_retreival_uuid(self):\n movie = Movie.objects.get(id=uuid.UUID(\n '01234567-0123-0123-0123-0123456789ab',\n ))\n self.assertEqual(movie.title, 'The Return of the King')", "def test_kernel_uuid_type(self):\n \n kernel_uuid = get_uuids()[0]\n \n # Check to make sure the returned value is a string\n self.assertEqual(type(kernel_uuid), str)", "def test_good_uuid_lowercase():\n good_uuid_lowercase = \"7cfb2470-b600-4eb3-a2cd-c1439e45b91f\"\n m = CannedRe.UUID.match(good_uuid_lowercase)\n # print getmembers(m)\n assert m is not None, \"Canned RegEx uuid test failed for %s\" % good_uuid_lowercase\n assert m.string == good_uuid_lowercase", "def is_order_id_valid(self):\n \n if not self.order_id:\n self.error_message = jsonify({'status':'error', 'message': 'orderId parameter missing'})\n return False\n if not re.match('^[a-f0-9]{32}$', self.order_id):\n self.error_message = jsonify({'status': 'error', 'message': 'orderId must be set to (hex) UUID'})\n return False\n return True", "def skip_or_run_uuid_test(func):\n\n return skip_or_run_test_tarantool(func, '2.4.1',\n 'does not support UUID type')", "def is_guid(value):\n try:\n _guids.Guid(value)\n except (_guids.Guid.BadGuidError, _guids.Guid.MissingGuidError):\n return False\n return True", "def validateUuid(sValue, aoNilValues = tuple([None, '']), fAllowNull = True):\n if sValue in aoNilValues:\n return (sValue, None if fAllowNull else 'Mandatory.');\n\n try:\n sValue = str(uuid.UUID(sValue));\n except:\n return (sValue, 'Invalid UUID value.');\n return (sValue, None);", "def test_uidvalidity(self):\n d = self._examineOrSelect()\n self._response(b'* OK [UIDVALIDITY 12345] UIDs valid')\n self.assertEqual(\n self.successResultOf(d),\n {'READ-WRITE': False, 'UIDVALIDITY': 12345})", "def test_register_uuid_only(self):\n username = \"testuuid\"\n data = {'username': username, 'password': \"123test\", 'email': 'test@me.com',\n 'newsletter': \"True\", 'research': \"True\", 'uuid': '49b44243-a240-49bb-8076-1dee1782e1fa'}\n\n response = self.requestRegistration(data, viewname='v1_api_register')\n\n self.assertTrue(response.status_code == status.HTTP_201_CREATED)\n self.assertTrue('client_id' in response.data)\n\n users = User.objects.filter(username=username)\n self.assertTrue(len(users) == 1)\n user = users[0]\n profile = user.user_profile\n phone = Device.objects.get(user=user)\n\n self.assertTrue(phone.uuid == self.uuid)", "def checkuuidsyntax(uuidtxt):\n score = 0\n if uuidtxt != None:\n if len(uuidtxt) < 10:\n score = 0\n elif uuidtxt.find(\"{\") > -1 or uuidtxt.find(\"}\") > -1 or uuidtxt.lower() != uuidtxt:\n score = 1\n else:\n score = 2\n return score", "def validate_device_id(device_id):\n regex = re.compile(r'^[0-9a-fA-F]{2,6}$')\n if regex.match(device_id) == None:\n raise ValidationError('Device ID must be 2-6 characters and must be hexadecimal (0-9,a-f,A-F).')", "def is_valid(self):\n if len(self) <= 64 and re.match(RE_VALID_UID, self):\n return True\n\n return False", "def test_boot_session_uuid_value(self):\n \n boot_session_uuid = get_uuids()[2]\n \n # Check to make sure it contains \"-\"\n # Exact matching is not done on purpose - UUIDs should be kept private!\n self.assertIn(\"-\", boot_session_uuid)", "def test_id_creation(self):\n user_1_id = eval(\"uuid.UUID('\" + self.user_1.id + \"')\")\n self.assertIsInstance(user_1_id, uuid.UUID)", "def test_good_values_for_validate_guid(good_value):\n bcvalidators.validate_guid(good_value)", "def test_is_valid_hex(self):\n self.assertTrue(is_valid_hex('#aabb11'))\n self.assertTrue(is_valid_hex('#000'))\n self.assertTrue(is_valid_hex('#aaa'))\n self.assertFalse(is_valid_hex('black'))\n self.assertFalse(is_valid_hex('bl(ack'))", "def test_get_shortuuid_specific_length(self):\n id = get_shortuuid(length=10)\n self.assertTrue(len(id) == 10)", "def test_002_name2uuid(self):\n our_uuid = self._get_uuid()\n\n given_name = self.clients.astakos.get_usernames([our_uuid])\n self.info(\"uuids2usernames returned %s\", given_name)\n self.assertIn(our_uuid, given_name)\n\n given_uuid = self.clients.astakos.get_uuids([given_name[our_uuid]])\n self.info(\"usernames2uuids returned %s\", given_uuid)\n self.assertIn(given_name[our_uuid], given_uuid)\n\n self.assertEqual(given_uuid[given_name[our_uuid]], our_uuid)", "def _validateUuid(dErrors, sName, sValue):\n (sValue, sError) = ModelDataBase.validateUuid(sValue, fAllowNull = True);\n if sError is not None:\n dErrors[sName] = sError;\n return sValue;", "def test_specified_uuid(self):\n movie = Movie.objects.get(title='The Return of the King')\n self.assertEqual(str(movie.id), '01234567-0123-0123-0123-0123456789ab')", "def _is_validation(video_id):\n hasher = md5()\n hasher.update(bytes(video_id, 'utf-8'))\n first = hasher.hexdigest()[0]\n return first in ['0', '1']", "def test_bad_uuid_filesystem(self):\n command_line = [\"filesystem\", \"debug\", \"get-object-path\", \"--uuid=not\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def test_uuids(server):\n\n assert len(server.uuids()) == 1\n assert len(server.uuids(10)) == 10\n assert isinstance(server.uuids()[0], six.string_types)", "def _validate_instrumentation_key(self) -> None:\n if not self.instrumentation_key:\n raise ValueError(\"Instrumentation key cannot be none or empty.\")\n match = uuid_regex_pattern.match(self.instrumentation_key)\n if not match:\n raise ValueError(\n \"Invalid instrumentation key. It should be a valid UUID.\")", "def uuid_regex_doctests():\n pass", "def test_uuid_retreival_str(self):\n movie = Movie.objects.get(id='01234567-0123-0123-0123-0123456789ab')\n self.assertEqual(movie.title, 'The Return of the King')", "def test_bad_uuid_pool(self):\n command_line = [\"pool\", \"debug\", \"get-object-path\", \"--uuid=not\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def test_is_valid_user_id_invalid(self):\n ids = (\n (\"SGVsbG8gd29ybGQ\", \"non-digit ASCII\"),\n (\"0J_RgNC40LLQtdGCINC80LjRgA\", \"cyrillic text\"),\n (\"4pO14p6L4p6C4pG34p264pGl8J-EiOKSj-KCieKBsA\", \"Unicode digits\"),\n (\"4oaA4oaB4oWh4oWi4Lyz4Lyq4Lyr4LG9\", \"Unicode numerals\"),\n (\"8J2fjvCdn5nwnZ-k8J2fr_Cdn7rgravvvJngr6c\", \"Unicode decimals\"),\n (\"{hello}[world]&(bye!)\", \"ASCII invalid Base64\"),\n (\"Þíß-ï§-ňøẗ-våłìÐ\", \"Unicode invalid Base64\"),\n )\n\n for user_id, msg in ids:\n with self.subTest(msg=msg):\n result = TokenRemover.is_valid_user_id(user_id)\n self.assertFalse(result)", "def test_service_uuid_string(self):\n return\n with self._flask_app.app_context():\n new_service = TeraService()\n new_service.service_uuid = 'Definitely longer than a 36 characters string'\n new_service.service_name = 'Name'\n new_service.service_key = 'key'\n new_service.service_hostname = 'Hostname'\n new_service.service_port = 2\n new_service.service_endpoint = \"Endpoint\"\n new_service.service_clientendpoint = 'Clientendpoint'\n new_service.service_enabled = True\n new_service.service_system = True\n new_service.service_editable_config = True\n self.db.session.add(new_service)\n self.db.session.commit()\n self.assertRaises(exc.IntegrityError, self.db.session.commit)", "def exists (self, uuid):\n return self.read (uuid) is not None", "def test_bad_values_for_validate_guid(bad_value):\n with pytest.raises(ValidationError):\n bcvalidators.validate_guid(bad_value)", "def test_validate_source_uuid_error(self):\n tag_value = {\"tag_value\": \"key_one\", \"value\": 0.2}\n self.basic_model[\"rates\"][0][\"tag_rates\"] = format_tag_rate(tag_values=[tag_value])\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.basic_model, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n serializer.validate_source_uuids([uuid4()])", "def is_mbid(mbid):\n try:\n mbid = uuid.UUID(mbid)\n good = True\n except ValueError as e:\n good = False\n except AttributeError:\n good = False\n\n return good", "def _validate_app_id(self, app_id):\n try:\n uuid_hex = UUID(app_id)\n regex = APP_SECRET_REGEX_LIST[0]\n m = regex.search(app_id)\n if not m:\n return False\n elif uuid_hex or m:\n return True\n except ValueError:\n return False", "def test_dict_keys_uuid(self):\n assert (\n orjson.dumps(\n {uuid.UUID(\"7202d115-7ff3-4c81-a7c1-2a1f067b1ece\"): True},\n option=orjson.OPT_NON_STR_KEYS,\n )\n == b'{\"7202d115-7ff3-4c81-a7c1-2a1f067b1ece\":true}'\n )", "def test_uuid_retreival(self):\n game = Game.objects.get(uuid='abcdabcd-abcd-abcd-abcd-abcdabcdabcd')\n self.assertEqual(game.title, 'Lego Lord of the Rings')", "def isValid(t_id):\n\tstr_id=str(t_id).strip()\n\treturn str_id.isdigit()", "def test_nonIntegerUIDVALIDITY(self):\n d = self._examineOrSelect()\n self._response(b'* OK [UIDVALIDITY foo] UIDs valid')\n self.failureResultOf(d, imap4.IllegalServerResponse)", "def test_get_shortuuid_name(self):\n id1 = get_shortuuid(name='mytesturl.com')\n id2 = get_shortuuid(name='mytesturl.com')\n self.assertEqual(id1, id2)", "def check_token(token):\n valid = re.compile(r\"^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-\"\n r\"[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$\")\n\n return valid.match(token)", "def _validateUuidNN(dErrors, sName, sValue):\n (sValue, sError) = ModelDataBase.validateUuid(sValue, fAllowNull = False);\n if sError is not None:\n dErrors[sName] = sError;\n return sValue;", "def test_verification_with_invalid_token(self) -> None:\n\n uuids: typing.List[str] = []\n for i in range(2, 5):\n uuids.append(str(uuid.uuid5(\n uuid.uuid1(1),\n f'abcd123456{i}'\n )))\n\n for temp_uuid in uuids:\n response: Response = self.client.get(f'/api/authors/verify/{temp_uuid}/')\n data = u.get_json(response)\n self.assertEqual(response.status_code, 404)\n self.assertEqual(data, {\n 'detail': 'Not found.'\n })", "def to_uuid(string):\n if sys.version_info[0] == 2:\n string = string.encode('utf-8')\n \n # This the seed Ansible has chosen for their UUID's\n return str(uuid.uuid5(uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E'), string))", "def safe_uuid() -> str:\n\n taken = os.listdir(DATA_DIR)\n while True:\n new_uuid = gen_uuid()\n if new_uuid in taken:\n logger.warning('uuid collision %s', new_uuid)\n else:\n logger.info('uuid=%s', new_uuid)\n return new_uuid", "def is_valid_hex(hex_code: str) -> bool:\n\n match = re.search(r'^#(?:[0-9a-fA-F]{3}){1,2}$', hex_code)\n\n if match:\n return True\n else:\n return False", "def test_uuid_null_blank_error(self):\n with self.assertRaises(AttributeError):\n class Thing(models.Model):\n uuid = models.UUIDField(blank=True)", "def _checkUID(self, uid):\n return uid in self._reservedUID", "def test_uuid_none(self):\n with self.assertRaises(ValueError):\n Game.objects.create(\n title='Lego Batman',\n )", "def test_get_user_by_uuiduser_uuid_get(self):\n pass", "def test_is_valid_user_id_valid(self):\n ids = (\n \"NDcyMjY1OTQzMDYyNDEzMzMy\",\n \"NDc1MDczNjI5Mzk5NTQ3OTA0\",\n \"NDY3MjIzMjMwNjUwNzc3NjQx\",\n )\n\n for user_id in ids:\n with self.subTest(user_id=user_id):\n result = TokenRemover.is_valid_user_id(user_id)\n self.assertTrue(result)", "def is_id(string):\n regex = re.compile('[0-9a-f]{32}\\Z', re.I)\n if bool(regex.match(string)):\n return True\n\n return False", "def test_list_of_uuids():\n assert is_list_of_uuids(None) is None\n assert is_list_of_uuids('a')\n assert is_list_of_uuids([uuid.uuid4()]) is None\n assert is_list_of_uuids([1])", "def is_hex(s): \n # if it can be converted to a base 16 int then it is hex\n try:\n int(s, 16)\n return True\n \n except ValueError:\n # it could not be converted therefore is not hex\n return False\n # end try", "def test_uuid_default(self):\r\n default = uuid.uuid4()\r\n prop = UUID(default=default, required=True)\r\n self.assertEqual(prop.to_database(None), prop.to_database(default))", "def _check_request_id(\n self,\n message: W24TechreadMessage\n ) -> None:\n self.assertEqual(type(message.request_id), UUID)", "def isValidUnitSId(*args):\n return _libsbml.SyntaxChecker_isValidUnitSId(*args)", "def _generate_uuid_str_if_none(given_uuid):\n\t\treturn given_uuid or uuid.uuid4().__str__()", "def test_uuid_editable(self):\n uuid_field = Game._meta.get_field_by_name('uuid')[0]\n self.assertEqual(uuid_field.editable, True)", "def getuuid(value, table, table_attrib, error_tail):\n if value is None:\n return value\n\n elif modelfor(value, table):\n value = getattr(value, table_attrib, None)\n if value is None:\n raise ValueError(\"null id provided for %s\" % error_tail)\n return value\n\n # if a string was provided then we should\n # try to convert it into a uuid first to\n # be sure it's valid\n elif isinstance(value, STRING_TYPES):\n UUID(value)\n return value\n\n elif isinstance(value, UUID):\n return str(value)\n\n else:\n raise ValueError(\"failed to determine %s\" % error_tail)", "def is_valid_mac(address):\n m = \"[0-9a-f]{2}([-:])[0-9a-f]{2}(\\\\1[0-9a-f]{2}){4}$\"\n if isinstance(address, six.string_types) and re.match(m, address.lower()):\n return True\n return False", "def test_request_uuid_type(self):\n request = StudySubjectsRequest(self.project_name, self.environment,\n subject_key_type=\"SubjectUUID\")\n self.assertTrue(\"subjectKeyType=SubjectUUID\" in request.url_path())", "def verify(timestamp):\n if not isinstance(timestamp, str):\n raise TypeError('\"{}\" is not str type'.format(type(timestamp)))\n elif match('^[0-9]{1,2}(:[0-9]{1,2}){1,2}(\\.[0-9]{1,9})?$', timestamp):\n return True\n return False", "def is_valid_node_id(val):\n if not val:\n return False\n if not isinstance(val, bytes) and not isinstance(val, bytearray):\n return False\n\n length = len(val)\n if length != SHA1_BIN_LEN and length != SHA2_BIN_LEN and \\\n length != SHA3_BIN_LEN:\n return False\n\n return True", "def test_serial_number(self):\n serial_number = self.device.serial_number\n self.assertTrue(serial_number)\n self.assertIsInstance(serial_number, str)", "def is_id_valid(id_code: str) -> bool:\n if id_code.isdigit():\n if len(str(id_code)) == 11:\n id_code = str(id_code)\n gender_number = int(id_code[0:1])\n day = int(id_code[5:7])\n month = int(id_code[3:5])\n year = id_code[1:3]\n birth_number = id_code[7:10]\n if is_valid_gender_number(gender_number) \\\n and is_valid_year_number(int(year)) \\\n and is_valid_month_number(int(month)) \\\n and is_valid_day_number(gender_number, int(year), int(month), int(day)) \\\n and is_valid_birth_number(int(birth_number)) \\\n and is_valid_control_number(str(id_code)):\n return True\n return False\n return False\n return False" ]
[ "0.8684954", "0.8333081", "0.8155957", "0.80367714", "0.8028158", "0.79428333", "0.77374667", "0.7734152", "0.7734152", "0.76239145", "0.7593849", "0.7555792", "0.7149705", "0.71090436", "0.69526494", "0.6922901", "0.66879606", "0.66867393", "0.66619414", "0.6655981", "0.66405064", "0.65931666", "0.6540948", "0.64809656", "0.64784664", "0.6473613", "0.64596105", "0.64430845", "0.6429046", "0.6383538", "0.6348884", "0.63268816", "0.62558854", "0.6239793", "0.6215066", "0.61318433", "0.60673547", "0.60598767", "0.60507077", "0.60489213", "0.6038718", "0.5971133", "0.59617937", "0.59480226", "0.5941713", "0.5903754", "0.5902699", "0.58919954", "0.58821666", "0.58741987", "0.58707553", "0.5854223", "0.5852458", "0.5829491", "0.5810598", "0.5792461", "0.57424456", "0.57396466", "0.5717547", "0.56964076", "0.56761336", "0.56484896", "0.5644937", "0.56289864", "0.5620875", "0.5616773", "0.56103873", "0.5578151", "0.556987", "0.5557931", "0.55469406", "0.55252975", "0.5499121", "0.5494942", "0.54689777", "0.542003", "0.5408083", "0.5401291", "0.53961223", "0.5387658", "0.5374563", "0.53576523", "0.5330449", "0.5313848", "0.53052855", "0.53050363", "0.52987653", "0.5294862", "0.5273521", "0.52697116", "0.52684766", "0.5268441", "0.526044", "0.5257371", "0.52478105", "0.52274364", "0.5214743", "0.52002615", "0.5191371", "0.51911944" ]
0.8562131
1
return parsed file info file_name should be a string with full path to file
def file_info(file_name, file_pattern): match = re.compile(file_pattern).match(file_name) if match: basepath = match.group('basepath') sensor = match.group('sensor') ax = match.group('ax') freq = match.group('freq') date = match.group('date') return basepath, sensor, ax, freq, date else: return None # there is no file extension to file_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _extract_file_info(directory, root_path, name):\n file_path = join(directory, name)\n rel_path = relpath(file_path, root_path)\n return {\n \"name\": name,\n \"path\": file_path,\n \"dir_name\": dirname(file_path),\n \"is_file\": isfile(file_path),\n \"is_dir\": isdir(file_path),\n \"level\": len(rel_path.split('/')) - 1\n }", "def parseFileInfo(self, file):\n # FileMode, FilesNumber, User, Group, Size, Date, Filename\n item = [f for f in file.split(' ') if f != '']\n \n ftype, size, date, filename = (item[0], item[4], ' '.join(item[5:8]), ' '.join(item[8:]))\n # print(ftype, size, date, filename)\n return (ftype, size, date, filename)", "def _get_info_from_filename(filename: str) -> dict:\n *parts, suffix = filename.split('.')\n dct = re.match(r'^(?P<name>[A-z0-9.]*)(-(?P<num_rows>[0-9]+))?$', '.'.join(parts)).groupdict()\n return {\n 'name': dct['name'],\n 'num_rows': int(dct['num_rows']) if dct['num_rows'] else None,\n 'format': suffix,\n }", "def parse_file(self, file_name: str):\n if not os.path.exists(file_name):\n log.error('File {} does not exist'.format(file_name))\n return None\n try:\n with open(file_name) as file:\n file_content = file.readlines()\n except Exception as ex:\n log.error('Failed to read file {}: {}'.format(file_name, str(ex)))\n return None\n return self.parse_from_string(''.join(file_content))", "def _get_file_info(filename):\n filename = os.path.split(filename)[-1]\n filename = filename[:str.rfind(filename, '.jsonl.gz')]\n _, mode, idx = filename.split('_')\n return mode, idx", "def get_file_info(fname) -> Tuple[str, bool]:\n fname = fname.lower()\n is_compressed = False\n if fname.endswith((\".tgz\", \".tar.gz\")):\n is_compressed = True\n fname = re.sub(r\"\\.(tgz|tar\\.gz)$\", \"\", fname)\n elif fname.endswith(\".gz\"):\n is_compressed = True\n fname = fname[:-3]\n elif fname.endswith(\".zip\"):\n is_compressed = True\n fname = fname[:-4]\n split = os.path.splitext(fname)\n return split[1], is_compressed", "def parse_file_name(file_name):\n\n elements = file_name.split(\"_\")\n if file_name.find(\"_VI_\") > 0:\n client = elements[0]\n capture_range = \"R1\"\n condition = elements[2]\n polarization = \"VIS\"\n shot = elements[4]\n modality = \"VIS\"\n else:\n client = elements[0]\n capture_range = elements[1]\n condition = elements[2]\n polarization = elements[3]\n shot = elements[4]\n modality = \"THERMAL\"\n \n return client, capture_range, condition, polarization, shot, modality", "def get_file_info(filename):\n info = {'buildroot_id': 0}\n info['filename'] = os.path.basename(filename)\n fbytes = os.path.getsize(filename)\n info['filesize'] = int(fbytes)\n # Kojihub only supports checksum_type: md5 for now.\n info['checksum_type'] = 'md5'\n checksum = util.get_md5sum(filename)\n info['checksum'] = checksum\n info['arch'] = 'x86_64'\n if filename.endswith('.tar.gz') or filename.endswith('.tar.xz'):\n info['type'] = 'tarball'\n elif filename.endswith('.deb'):\n info['type'] = 'deb'\n elif filename.endswith('.dsc'):\n info['type'] = 'dsc'\n elif filename.endswith('.log'):\n info['type'] = 'log'\n else:\n raise RuntimeError('unknown extension for %s' % filename)\n info['extra'] = {\n 'typeinfo': {\n 'debian': {},\n },\n }\n return info", "def get_file_data(filename):", "def parse_def(filename):\n info = dict()\n encoding = open_guess_encoding(filename)\n # errors=\"surrogateescape\" is used to ignore unknown characters if the\n # encoding is incorrectly guessed. Shift-JIS seems to give many errors\n with open(filename, encoding=encoding, errors='surrogateescape') as fp:\n try:\n for line in fp:\n match = name_regex.match(line)\n if match:\n name = match.groups()[0]\n if name != \"\":\n info['name'] = name\n break\n\n except UnicodeDecodeError:\n print('unicode error: ', filename)\n\n return info", "def getImageInformation(file_path):\n if os.path.isdir(file_path) == False:\n file_dir = os.path.basename(file_path)\n file_name = os.path.splitext(file_dir)[0]\n file_format = os.path.splitext(file_path)[1]\n return file_name, file_format", "def read_file(self, filename):\n data = None\n with open(filename, \"rb\") as f:\n data = f.read()\n filetype = filename.split('.')[-1] if '.' in filename.split('/')[-1] else None\n return data, filetype", "def _parse_filename(filename, metadata):\n\n file_noext = os.path.splitext(filename)[0]\n fname = file_noext.split(\"_\")\n\n metadata[\"scene_id\"] = fname[1]\n metadata[\n \"beam_mode\"] = sat_properties.radarsat_product_characteristics[\n fname[2]]\n metadata[\"product_type\"] = fname[-1]\n try:\n metadata[\n \"product_description\"] = sat_properties.radarsat_1_data_products[\n fname[-1][:3]]['description']\n except Exception:\n metadata[\"product_description\"] = \"\"\n\n metadata[\"scene_mean_time\"] = datetime.datetime.strptime(\n fname[3] + fname[4], \"%Y%m%d%H%M%S\")\n\n return metadata", "def read_file(name_file):\n with open(name_file, 'r') as file:\n return file.read()", "def get_metadata_from_filename(file_name: str) -> namedtuple:\n if os.path.isabs(f):\n file_name = os.path.basename(file_name)\n original_image_name = file_name.split('-')[0]\n x_pos = int(file_name.split('.')[-2].split('+')[-2:][0])\n Metadata = namedtuple('Metadata', ['original_image_name', 'x_pos'])\n return Metadata(original_image_name, x_pos)", "def get_metadata_from_filename(f,file_name_field_order,file_name_delimiter,\\\n default_text='not_specified',verbose=False): \n filename_components = {}\n for i,field in enumerate(f.split(file_name_delimiter)):\n filename_components[i]=field\n #if verbose:\n # print \"Filename components:\",filename_components\n filename_metadata = {}\n try:\n for field in file_name_field_order.keys():\n filename_metadata[field] =\\\n filename_components.get(file_name_field_order.get(field,default_text),default_text)\n\n #if verbose:\n # print \"filename_metadata:\",filename_metadata\n except IndexError, e:\n print \"Could not parse filename %s using delimiter: %s. Skipping...\" %(f,file_name_delimiter)\n return None\n\n return filename_metadata", "def file_info(self, f):\n ld8 = self.ld8_extract(f) # get luna_date\n sid = self.sesid(ld8) # make luna_visitnum\n age = self.age_lookup.get(sid)\n return (sid, age)", "def read_atts(self, file_name):\n\n match = re.match(self.regex_pattern, file_name)\n\n return match.groupdict()", "def read_file(file_name):\n information = []\n with open(f\"{PATH}\\\\input\\\\{file_name}.csv\") as file:\n reader = csv.DictReader(file)\n for _ in reader:\n information.append(_)\n return information", "def parseFilename(fileName):\n # regex to match names like Axis-BaldCA_2018-05-29T16_02_30_129496.jpg\n # and bm-n-mobo-c__2017-06-25z11;53;33.jpg\n regexExpanded = '([A-Za-z0-9-_]+[^_])_+(\\d{4}-\\d\\d-\\d\\d)T(\\d\\d)[_;](\\d\\d)[_;](\\d\\d)'\n # regex to match diff minutes spec for subtracted images\n regexDiff = '(_Diff(\\d+))?'\n # regex to match optional crop information e.g., Axis-Cowles_2019-02-19T16;23;49_Crop_270x521x569x820.jpg\n regexOptionalCrop = '(_Crop_(-?\\d+)x(-?\\d+)x(\\d+)x(\\d+))?'\n matchesExp = re.findall(regexExpanded + regexDiff + regexOptionalCrop, fileName)\n # regex to match names like 1499546263.jpg\n regexUnixTime = '(1\\d{9})'\n matchesUnix = re.findall(regexUnixTime + regexDiff + regexOptionalCrop, fileName)\n cropInfo = None\n if len(matchesExp) == 1:\n match = matchesExp[0]\n parsed = {\n 'cameraID': match[0],\n 'date': match[1],\n 'hours': match[2],\n 'minutes': match[3],\n 'seconds': match[4]\n }\n isoStr = '{date}T{hour}:{min}:{sec}'.format(date=parsed['date'],hour=parsed['hours'],min=parsed['minutes'],sec=parsed['seconds'])\n dt = dateutil.parser.parse(isoStr)\n unixTime = int(dt.timestamp())\n parsed['diffMinutes'] = int(match[6] or 0)\n cropInfo = match[-4:]\n elif len(matchesUnix) == 1:\n match = matchesUnix[0]\n unixTime = int(match[0])\n dt = datetime.datetime.fromtimestamp(unixTime)\n isoStr = datetime.datetime.fromtimestamp(unixTime).isoformat()\n parsed = {\n 'cameraID': 'UNKNOWN_' + fileName,\n 'date': dt.date().isoformat(),\n 'hours': str(dt.hour),\n 'minutes': str(dt.minute),\n 'seconds': str(dt.second)\n }\n parsed['diffMinutes'] = int(match[2] or 0)\n cropInfo = match[-4:]\n else:\n logging.error('Failed to parse name %s', fileName)\n return None\n if cropInfo[0]:\n parsed['minX'] = int(cropInfo[0])\n parsed['minY'] = int(cropInfo[1])\n parsed['maxX'] = int(cropInfo[2])\n parsed['maxY'] = int(cropInfo[3])\n parsed['isoStr'] = isoStr\n parsed['unixTime'] = int(unixTime)\n return parsed", "def extract_info(self, filename, results):\n\n logging.info(\"File to extract info: '{0}'\".format(filename))\n\n info = {\"names\": [],\n \"suites\": [],\n \"statuses\": [],\n \"times\": [],\n \"files\": [],\n \"functions\": [],\n \"descriptions\": []}\n\n with open(filename) as file_handler:\n\n root = etree.parse(file_handler).getroot()\n\n if root.tag == \"report\":\n\n for testsuite in root.getchildren():\n\n self.extract_testsuite(testsuite, info)\n\n elif root.tag == \"testsuite\":\n\n self.extract_testsuite(root, info)\n\n elif root.tag == \"test_case_result\":\n\n logging.warning(\"XML files using 'test_case_result' as their\\\n root are not supported yet!\")\n\n # adds the extracted information to the main dict\n if reduce(lambda x, y: None if not x else y, info.values()):\n\n file_results = dict(zip(info[\"names\"],\n zip(info[\"suites\"],\n info[\"files\"],\n info[\"functions\"],\n info[\"descriptions\"],\n info[\"statuses\"],\n info[\"times\"])))\n\n results[os.path.abspath(filename)] = file_results", "def parse_filename(filename, filename_format=\"ALL\"):\n\n # parse filename\n basename = os.path.basename(filename)\n\n # disable parsing if filename_format is None\n if filename_format is None:\n return {\"filename\": filename}\n\n # try all filename formats for special value ALL\n if filename_format == \"ALL\":\n for parser in filename_format_parser.values():\n try:\n info = parser(basename)\n except ValueError:\n info = {}\n continue\n else:\n break\n elif filename_format in filename_format_parser:\n parser = filename_format_parser[filename_format]\n info = parser(basename)\n else:\n raise KeyError(\"unknown filename_format={}\".format(filename_format))\n\n\n # define nuclide tuple\n info[\"filename\"] = filename\n if (\"Z\" in info) and (\"N\" in info):\n info[\"nuclide\"] = (info[\"Z\"],info[\"N\"])\n\n return info", "def get_file(self, name):\n return self.files[name]", "def parse_filename(filename): # , time_fmt=TIME_INFILE_FMT):\n # Split the name up into its \"blocks\"\n parts = filename.split(\"_\")\n hive_str, rpi_str = parts[1:3]\n day_str = parts[3]\n method = parts[5]\n\n # Parse Hive and RPi number\n hive = int(hive_str[-1])\n rpi = int(rpi_str[-1])\n method = method.strip(\".csv\")\n\n # # Parse timestring into a datetime object\n # dt_naive = datetime.strptime(t_str, time_fmt)\n # dt_utc = pytz.utc.localize(dt_naive)\n\n return hive, rpi, method, day_str", "def file_info(label, filename):\n filekit = current_app.filekits.get(label)\n if filekit is None: \n abort(404)\n try:\n fkit = filekit(filename)\n except FileNotFound:\n abort(404)\n return jsonify(fkit.to_dict())", "def parse_filename(cls, filename):\n #from nose.tools import set_trace; set_trace()\n m = re.match(cls._pattern, os.path.basename(filename))\n basename = m.group(1)\n bandname = cls._bandmap.get(m.group(2), m.group(2))\n return basename, bandname", "def info(self):\n if self.file:\n parts = str(self.file).split('/')\n return {\n 'test-name': parts[-2],\n 'case-name': parts[-1].split('.')[0],\n }\n return {}", "def read_parsed_data(parsed_filename_path, parsed_topology_data_path):\n with open(parsed_filename_path, 'rb') as f:\n file_name = pk.load(f)\n with open(parsed_topology_data_path, 'rb') as f:\n topology_info = pk.load(f)\n return file_name, topology_info", "def _load_spec_filename_additional_info(spec_filename):\n import json\n\n try:\n additional_info_filename = _get_additional_info_filename(spec_filename)\n\n with open(additional_info_filename, \"r\") as stream:\n source_to_mtime = json.load(stream)\n return source_to_mtime\n except:\n log.exception(\"Unable to load source mtimes from: %s\", additional_info_filename)\n return {}", "def _extreact_qml_file_info(file):\n return {\n \"name\": file[\"name\"],\n \"path\": file[\"path\"],\n \"isFile\": file[\"is_file\"],\n \"isDir\": file[\"is_dir\"],\n \"level\": file[\"level\"]\n }", "def parse_media_info(filename):\n print_info('Extracting hash from {0}'.format(filename))\n media_info = MediaInfo()\n for media_info_type in MEDIA_INFO_REGEXS:\n #print_info('Parsing for {0}'.format(media_info_type))\n for regex in MEDIA_INFO_REGEXS[media_info_type]:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_data = m.group('MediaInfo').upper()\n print_info('Extracted {0}: {1}'.format(media_info_type, extracted_data))\n\n # Before we set, do any needed cleanup\n if media_info_type == 'resolution':\n if not extracted_data.endswith('p'):\n resolution = int(extracted_data)\n if resolution == 1280:\n extracted_data = '720'\n extracted_data = extracted_data + 'p'\n media_info.resolution = extracted_data\n if media_info_type == 'source':\n media_info.source = extracted_data.replace('-', '')\n elif media_info_type == 'audio_source':\n media_info.audio_source = extracted_data\n elif media_info_type == 'encoding':\n media_info.encoding = re.sub('X', 'H', extracted_data)\n elif media_info_type == 'color_bits':\n media_info.color_bits = extracted_data\n break\n \n \n return media_info", "def get_info(info_filename):\n with open(info_filename) as info_file:\n info_dict = csv.DictReader(info_file)\n info = {}\n for row in info_dict:\n info[row['path']] = datetime.datetime.strptime(row['start'],\n '%Y-%m-%d')\n return info", "def parse_file(file_name, out):\n try:\n with open(file_name) as f:\n parse_string(f.read(), out)\n except Exception as e:\n logging.error(\"Error when opening and parsing file %s: %s\" % (file_name, e))\n print(\"Error occurred when parsing file. See logs for more details.\",file=sys.stderr)", "def stat_file(self, path, info):\n return {}", "def parse_info_from_file(path):\n try:\n filename = os.path.split(path)[1]\n filename = os.path.splitext(filename)[0]\n age, gender, race, _ = filename.split('_')\n\n return int(age), dataset_dict['gender_id'][int(gender)], dataset_dict['race_id'][int(race)]\n except Exception as ex:\n return None, None, None", "def parseFileName(filename):\n entry = DataEntry(\"\",0,{},{},0,0)\n wordArray = filename.split(\".\")\n entry.publication_name = wordArray[1]\n entry.year = wordArray[0]\n return entry", "def parse(filename: str) -> str:\n with open(filename) as file:\n return file.readline().strip()", "def get_file_data(file_name):\r\n try:\r\n with open(file_name, 'rb') as input_file:\r\n data = input_file.read()\r\n return data\r\n except Exception as err:\r\n return str(err).encode()", "def get_info(f_path,f_name):\n\n d_string = False\n d_str_c = 0\n cmt_c = 0\n v_line_c = 0\n code_c = 0\n \n ext = get_ext(f_name)\n try:\n l_data = lang_data.loc[ext]\n print(\"File:\",f_name)\n print(\"Language:\",l_data[\"name\"])\n mlcmto,mlcmtc = get_mlcmts(l_data)\n cmt = get_cmt(l_data)\n \n except:\n raise ValueError(\"Extension not found in plang.csv\")\n f = open(f_path+f_name,\"r\")\n while True:\n line = f.readline()\n print(list(line))\n #print(\"\")\n if len(line)==0:\n break\n else:\n if d_string:\n d_str_c+=1\n if is_mlcmt(line,mlcmto,mlcmtc)[1]>=0:\n d_string = False\n\n else:\n if is_mlcmt(line,mlcmto,mlcmtc)[0]>=0:\n d_str_c+=1\n if is_mlcmt(line,mlcmto,mlcmtc)[1]==-1:\n d_string = True\n elif is_cmt(line,cmt):\n cmt_c+=1\n elif is_void(line):\n v_line_c+=1\n else:\n code_c+=1\n print(code_c)", "def parseFilename(self, filename):\r\n match = self.filename_regex.match(filename)\r\n if match is None:\r\n # TODO?: Raise exception?\r\n '''print \"Filename\", filename, \"unrecognized!\"'''\r\n return None\r\n lat = int(match.group(2))\r\n lon = int(match.group(4))\r\n if match.group(1) == \"S\":\r\n lat = -lat\r\n if match.group(3) == \"W\":\r\n lon = -lon\r\n return lat, lon", "def extract_filename(self, filename: str, lang: str) -> 'typing.Dict[str, str]':\n meta = {}\n meta['date'] = self._getNikolaTime(os.path.getctime(filename))\n w_title = os.path.basename(filename).replace(\"/\", \"_\", 100).rstrip('.org')\n w_title = w_title.replace(\" \", \"_\", 100)\n meta['w_title'] = w_title\n\n if 'test' in filename:\n meta['write'] = True\n\n split = filename.split(\"/\") \n if len(split) > 2:\n cate = split[1]\n cate = self._lookup_cate_table(cate)\n meta['category'] = cate\n\n self._manually_write_meta(filename, meta)\n return meta", "def getfile(self, name):\n try:\n datname = self.getname(name, \"dat\")\n tadname = self.getname(name, \"tad\")\n if datname and tadname:\n return Datafile(name, open(datname, \"rb\"), open(tadname, \"rb\"), self.kod)\n except IOError:\n return", "def get(name):\n\n filename = find(name)\n if filename == None:\n return name\n return open(filename).read()", "def _get_python_info_rename(path: str) -> str:\n if path.name.endswith(\".egg-info\"):\n f = \"PKG-INFO\"\n else:\n # Assume dist-info. Are there other options?\n f = \"METADATA\"\n pkgmetainfodata = path / f\n with pkgmetainfodata.open() as f:\n for line in f:\n match = re.match(r'^Name: ([A-Z-a-z].+)', line)\n if match:\n name = match.group(1)\n break\n if not line.strip():\n # First blank line; gone too far; give up\n return\n else:\n return\n return name + path.suffix", "def parse_file(self, file_name, **kwargs):\n with io.open(file_name, 'r', encoding='utf-8') as f:\n content = f.read()\n return self.parse(content, file_name=file_name, **kwargs)", "def extract_file_name(self, input_file):\n self.file_name_with_ext, self.file_name = extract_file_name(input_file)", "def parse_filename(cls, filename):\n words = filename.split('_')\n return words[0], int(words[1][1:]), int(words[2])", "def ParsePkgInfoFile(filename, valid_keys=None, required_keys=None):\n with open(filename) as f:\n return ParsePkgInfo(f.read(), filename, valid_keys, required_keys)", "def info_file(self):\n return self._info_file", "def source_info(self,fname):\n\t\t# Has this source file already been parsed?\n\t\tif fname in self.src_info:\n\t\t\t# if yes return the previous parse-result\n\t\t\treturn self.src_info[fname]\n\t\t\n\t\t# Create a source file parse-info-container and ast-parse the sourcefile\n\t\tself.src_info[fname] = {}\n\t\tsrc_fp = open(fname,'rb')\n\t\tsrc = src_fp.read()\n\t\tsrc_fp.close()\n\t\tsrc_encoding = detect(src)\n\t\ta = ast.parse(src)\n\t\tdel src\n\t\tself.src_encoding[fname] = src_encoding['encoding']\n\t\t\n\t\t# Analyse the ast\n\t\tfor obj in a.body:\n\t\t\tif type(obj)==ast.ClassDef:\n\t\t\t\tc = obj\n\t\t\t\tfirstlineno = c.lineno\n\t\t\t\tlastlineno = c.lineno\n\t\t\t\tclass_doc_lines = []\n\t\t\t\tfirst_class_obj = True\n\t\t\t\tfor obj in c.body:\n\t\t\t\t\t# Detect documentation for class\n\t\t\t\t\tif first_class_obj and type(obj)==ast.Expr and type(obj.value)==ast.Str:\n\t\t\t\t\t\tfor doc_line in obj.value.s.strip().replace('\\r\\n','\\n').split('\\n'):\n\t\t\t\t\t\t\tclass_doc_lines += [doc_line.strip()]\n\t\t\t\t\t# Detect class methods\n\t\t\t\t\tif type(obj)==ast.FunctionDef:\n\t\t\t\t\t\tlastlineno = obj.lineno\n\t\t\t\t\tfirst_class_obj = False\n\t\t\t\tself.src_info[fname][c.name] = (firstlineno,lastlineno,class_doc_lines)\n\t\t\n\t\t# return the parse-info-container\n\t\treturn self.src_info[fname]", "def parse_file(filename):\n filepath = root + filename\n try:\n if filename.endswith('.txt'):\n with open(filepath) as file:\n return file.read().splitlines()\n elif filename.endswith('.csv'):\n reader = csv.reader(open(filepath))\n result = {}\n for row in reader:\n key = row[0]\n result[key] = row[1]\n return result\n except FileNotFoundError:\n with open(filepath,\"w+\") as file:\n append_string_to_textfile(filename, \"Placeholder\")\n print(\"here\")\n return (parse_file(filename))", "def splitFilename(filename):\n\n if filename[-4:] == '.rpm':\n filename = filename[:-4]\n \n archIndex = filename.rfind('.')\n arch = filename[archIndex+1:]\n\n relIndex = filename[:archIndex].rfind('-')\n rel = filename[relIndex+1:archIndex]\n\n verIndex = filename[:relIndex].rfind('-')\n ver = filename[verIndex+1:relIndex]\n\n epochIndex = filename.find(':')\n if epochIndex == -1:\n epoch = ''\n else:\n epoch = filename[:epochIndex]\n \n name = filename[epochIndex + 1:verIndex]\n return name, ver, rel, epoch, arch", "def read_parse_raw_data(path):\n file_list = TopologyHelper.get_file_list(path)\n print(\"Reading \" + str(len(file_list)) + \" files from \" + path)\n topology_info = []\n file_name = []\n for file in file_list:\n try:\n r = TopologyHelper.parse_file(file)\n tmp = (r[0])['Topology']\n topology_info.append(tmp)\n t = r[1]\n file_name.append(t)\n except:\n continue\n print(\"Parsing completed\")\n return file_name, topology_info", "def parse_file(self, file_name, **kwargs):\n with codecs.open(file_name, 'r', 'utf-8') as f:\n content = f.read()\n return self.parse(content, file_name=file_name, **kwargs)", "def info_resource():\n fn = request.args.get(\"filename\")\n info = get_file_info(fn)\n if info:\n return jsonify(info)\n else:\n return Response(status=404)", "def parse_filename(f):\n problem = f[2:5]\n extension = f.split('.')[-1]\n if extension not in langs.keys():\n # if the extension isn't in our list we don't care about the file\n return (None, None)\n return (problem, extension)", "def info(self, *path):\n target = self.localpath(*path)\n return _open_file_info(target + '.info')", "def get(name, filename):\n\tlogging.info(\"Reading {} from {}\".format(name, filename))\n\tlogging.debug(\"Opening file\")\n\twith open(filename, \"r+\") as f:\n\t\treader = csv.reader(f)\n\t\tlogging.debug(\"Reading name/snippet from file\")\n\t\tin_file = False\n\t\tfor row in reader:\n\t\t\tif str(row[0]) == name:\n\t\t\t\tin_file = True\n\t\t\t\tprint row\n\t\tif in_file == False:\n\t\t\tprint \"That's not in this file\"\n\tlogging.debug(\"Read successful\")\n\treturn name, filename", "def _get_file_path(self, url):\n try:\n row = ET.fromstring(self._session.get(url, headers={\"Access-Token\":self._token}).text)[1][2][1]\n data = [row[1].text, row[1].text, row[2].text]\n if \" - S\" in data[0]:\n data[0] = data[0][0:data[1].rfind(\" - S\")]\n elif \" (\" in data[0]:\n data[0] = data[0][0:data[1].rfind(\" (\")]\n return data\n except Exception as e:\n exception_type = type(e).__name__\n print(\"Unable to get media name.\")\n print(exception_type)\n print(e)\n return None", "def get_name(fname):\n if fname.endswith('.nii.gz'):\n fname = fname.replace('.nii.gz', '')\n\n name_stuff = {}\n tmp = fname.split('_') # tmp is just a placeholder\n elems = tmp[-4:-1] # The elements of the file name in a list\n name_stuff['IC'] = elems[0][2:] # 18\n name_stuff['Scan'] = elems[1][1:] # 3\n name_stuff['Hemi'] = elems[2].upper()\n\n return name_stuff", "def get_info(raw_filename, epochs_filename):\n trans, fiducials, info = get_head_correct_info(\n raw_filename, epochs_filename)\n return info", "def get_file_infos(path_spec):\n\n file_entry = dfvfs_utils.pathspec_to_fileentry(path_spec)\n stat = file_entry.GetStat()\n if not stat:\n LOGGER.warning(\"Could not get stat object for %s\", file_entry.name)\n\n entry = {\n \"size\": getattr(stat, 'size', 0),\n \"name\": file_entry.name,\n \"type\": file_entry.entry_type,\n }\n for time in [('atime', 'accessed'), ('mtime', 'modified'), ('crtime', 'created')]:\n secs = getattr(stat, time[0], 0)\n nanos = getattr(stat, time[0] + '_nano', 0)\n if secs and secs != 0:\n datetime_entry = datetime.utcfromtimestamp(secs)\n datetime_entry = datetime_entry.replace(microsecond=int(nanos / 10))\n entry[time[1]] = datetime_entry.isoformat(timespec='milliseconds') + 'Z'\n\n # the path is not part of STIX 2.0 for file objects, but is very useful to have,\n # so we make it a custom attribute\n entry[\"path\"] = path_spec.location\n\n return entry", "def edf_info(file_name, header_size=None, verbose=False):\n try:\n f = open(file_name, 'r', encoding='latin-1')\n except TypeError:\n f = open(file_name, 'r') # fall back\n if header_size is None:\n # guess the header size by peeking at the first chunk of 512 bytes\n header_values = unpack_header(f.read(512))\n total_file_size = os.path.getsize(file_name)\n payload_size = int(header_values['Size'].split('.')[0])\n header_size = total_file_size - payload_size\n if verbose:\n print('determined header size is %d bytes' % header_size)\n f.seek(0)\n header = f.read(header_size)\n f.close()\n return unpack_header(header)", "def read_file(self, file_name):\n\n with open(file_name, 'r') as file_input:\n file_content = file_input.read()\n return file_content", "def read_file(file_name):\n with open(file_name, \"r\") as f:\n return f.read()", "def get_file_info(fpath, raw=False):\n statbuf = os.stat(fpath)\n\n try:\n # Sometimes this fails\n if sys.platform.startswith('win32'):\n import win32security\n sec_desc = win32security.GetFileSecurity(\n fpath, win32security.OWNER_SECURITY_INFORMATION)\n owner_sid = sec_desc.GetSecurityDescriptorOwner()\n owner = win32security.LookupAccountSid(None, owner_sid)[0]\n else:\n from pwd import getpwuid\n owner = getpwuid(statbuf.st_uid).pw_name\n except Exception:\n owner = None\n\n info = OrderedDict([\n ('created', statbuf.st_ctime),\n ('filesize', statbuf.st_size),\n ('last_accessed', statbuf.st_atime),\n ('last_modified', statbuf.st_mtime),\n ('owner', owner)\n ])\n # permission = [os.access(fpath, os.W_OK), os.access(fpath, os.X_OK)]\n\n if not raw:\n time_keys = [\n 'last_accessed',\n 'last_modified',\n 'created',\n ]\n for key in time_keys:\n info[key] = datetime.datetime.fromtimestamp(info[key])\n return info", "def get_resolution(path_name, file_name):\n with open(path_name + 'resolution.txt') as file, \\\n mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) as s:\n file.seek(0, s.find(file_name.encode()))\n line = file.readline().split(' ')\n return [line[1], line[2].rstrip('\\n')]", "def read_file(file_name):\n with open(file_name, 'r') as f:\n return f.read()", "def decompose_newstyle_name(filename):\n path, parts, ext = _get_fields(filename)\n observatory = parts[0]\n serial = list_get(parts, 3, \"\")\n\n if ext == \".pmap\":\n assert len(parts) in [1,2], \"Invalid .pmap filename \" + repr(filename)\n instrument, filekind = \"\", \"\"\n serial = list_get(parts, 1, \"\")\n elif ext == \".imap\":\n assert len(parts) in [2,3], \"Invalid .imap filename \" + repr(filename)\n instrument = parts[1]\n filekind = \"\"\n serial = list_get(parts, 2, \"\")\n else:\n assert len(parts) in [3,4], \"Invalid filename \" + repr(filename)\n instrument = parts[1]\n filekind = parts[2]\n serial = list_get(parts, 3, \"\")\n\n # Don't include filename in these or it messes up crds.certify unique error tracking.\n\n assert instrument in INSTRUMENTS+[\"\"], \"Invalid instrument \" + repr(instrument)\n assert filekind in FILEKINDS+[\"\"], \"Invalid filekind \" + repr(filekind)\n assert re.match(r\"\\d*\", serial), \"Invalid id field \" + repr(id)\n # extension may vary for upload temporary files.\n\n return path, observatory, instrument, filekind, serial, ext", "def ParseFile(self, handle, name):\n return cPickle.load(handle)", "def metadata_name(filename):\n\tif test_hachoir_extension(filename):\n\t\tmetadata = metadata_for_file(filename)\n\t\tif metadata:\n\t\t\tdata = dict([\n\t\t\t\t(data.key, data.values[0].value)\n\t\t\t\tfor data in metadata\n\t\t\t\tif data.values\n\t\t\t\t])\n\t\telse:\n\t\t\tdata=None\n\telif test_3D_extension(filename):# 3D not in the extention \n\t\tdata = {'mime_type':'model'}\n\telse:\n\t\tdata=None\n\treturn data", "def identify_file(self, file):", "def read_file(file_name):\r\n\r\n if file_name.find('.md') == -1:\r\n file_name += '.md'\r\n\r\n with open(file_name, 'r', encoding='utf-8') as file:\r\n file_data = file.read()\r\n\r\n return file_data", "def process(path, name):\n d = {}\n path = path / name\n with open(path.as_posix()) as fd:\n file_contents = fd.read()\n module = ast.parse(file_contents)\n docstring = ast.get_docstring(module)\n docstring_line = get_value(docstring)\n d['name'] = name\n if docstring_line:\n d['docstring'] = docstring_line\n else:\n d['docstring'] = 'No docstring provided.'\n return d", "def read_file(self, currentIndex):\n handle = open(\"Program Files\\\\\" + str(currentIndex) + \".tvInfo\", \"r\")\n data = handle.read() #reading description\n handle.close()\n return data", "def read_file(self, currentIndex):\n handle = open(\"Program Files\\\\TvInfo\\\\\" + str(currentIndex) + \".tvInfo\", \"r\")\n data = handle.read() #reading description\n handle.close()\n return data", "def getSeriesInfo(fileName):\r\n\r\n # get only the name\r\n seriesName = fileName.replace(\".ser\", \"\")\r\n \r\n # find out how many sections there are\r\n sectionNums = []\r\n \r\n # search each file in the folder that ends with a number\r\n for file in os.listdir():\r\n try:\r\n sectionNums.append(int(file[file.rfind(\".\")+1:]))\r\n except:\r\n pass\r\n\r\n # sort the section numbers so they are in order\r\n sectionNums.sort()\r\n\r\n return seriesName, sectionNums", "def _read_stats(self, name):\n if os.name == 'nt':\n name = asunicode(name)\n stats = os.stat(name)\n mode = oct(stats.st_mode)[-4:]\n size = stats.st_size\n atime = int(stats.st_atime)\n mtime = int(stats.st_mtime)\n return (mode, size, mtime, atime)", "def _pname_and_metadata(in_file):\n\n\n if in_file.endswith(\".csv\"):\n raise ValueError(\"Did not find input metadata file: %s\" % in_file)\n base, md, global_vars = in_file, {}, {}\n md_file = None\n return base, md, global_vars, md_file", "def get_metadata(self, filename):\n return self.execute_json(filename)[0]", "def get_video_info(self, file_name):\n info, colnames = self._get_video_info(file_name)\n if info is None or len(info)==0:\n return None\n return dict(zip(colnames, info[0]))", "def find_job_info(self, file_name):\n for job_info in self.processResults:\n if file_name in job_info['file']:\n return job_info\n raise Exception('Job info not found')", "def parse_tarinfo(\n tarinfo: tarfile.TarInfo, tar_file: tarfile.TarFile\n) -> Tuple[Optional[bytes], Path]:\n path = Path(tarinfo.path)\n if path.suffix == \".nodata\" or path.suffix == \".nometa\":\n return None, path\n data = tar_file.extractfile(tarinfo).read()\n return data, path", "def getfilemeta(path):\n if os.path.isfile(path):\n meta = os.stat(path)\n return (meta)\n else:\n raise Exception('File not exist')", "def extract_file_tags_from_file_name(filePath): #TODO untested and unused\n out_dict = {}\n studyid = 'n/a'\n subjectid = 'n/a'\n visitid = '1'\n\n if 'scorefiles' in filePath:\n studyid = filePath.split('scorefiles')[0]\n studyid = studyid.split('\\\\')\n if studyid[-1] == '':\n studyid = studyid[-2]\n else:\n studyid = studyid[-1]\n subjectid = filePath.split('scorefiles')[-1]\n subjectid = subjectid.split('subjectid')[-1]\n subjectid = subjectid.split('.')[0]\n if 'visit' in filePath:\n visitid = subjectid.split('visitid')[-1]\n visitid = visitid.split('.')[0]\n subjectid = subjectid.split('visitid')[0]\n\n subjectid = str(subjectid).lstrip(STRIP).rstrip(STRIP)\n subjectid = str(subjectid).lstrip('_').rstrip('_')\n visitid = str(visitid).lstrip(STRIP).rstrip(STRIP)\n visitid = str(visitid).lstrip('_').rstrip('_')\n studyid = str(studyid).lstrip(STRIP).rstrip(STRIP)\n out_dict['subjectid'] = subjectid\n out_dict['studyid'] = studyid\n out_dict['visitid'] = visitid\n return out_dict", "def get_file_text(file_name):\n\tf = open(file_name, 'r')\n\ttext = f.read()\n\treturn text", "def identify_filename_metadata(filename, file_format='CMIP6'):\n if file_format == 'CMIP5':\n components = ['cmor_name', 'table', 'climate_model', 'experiment',\n 'rip_code', 'date_string']\n elif file_format == 'CMIP6':\n components = ['cmor_name', 'table', 'climate_model', 'experiment',\n 'rip_code', 'grid', 'date_string']\n else:\n raise NotImplementedError('file_format must be CMIP5 or CMIP6')\n\n basename = os.path.basename(filename)\n directory = os.path.dirname(filename)\n metadata = {'basename': basename, 'directory': directory}\n\n # split the filename into sections\n if basename.endswith('-clim.nc'):\n filename_sects = basename.rpartition('-clim.nc')[0].split('_')\n else:\n filename_sects = basename.rpartition('.nc')[0].split('_')\n\n # but if experiment present_day was in the filename, join these sections\n # back together. This should only occur in pre-PRIMAVERA data.\n if filename_sects[3] == 'present' and filename_sects[4] == 'day':\n filename_sects[3] += '_' + filename_sects.pop(4)\n\n # deduce as much as possible from the filename\n try:\n for cmpt_name, cmpt in zip(components, filename_sects):\n if cmpt_name == 'date_string':\n frequency = _get_frequency(metadata['table'])\n start_date, end_date = cmpt.split('-')\n try:\n metadata['start_date'] = _make_partial_date_time(\n start_date, frequency)\n metadata['end_date'] = _make_partial_date_time(\n end_date, frequency)\n except ValueError:\n msg = 'Unknown date format in filename: {}'.format(\n filename)\n raise FileValidationError(msg)\n else:\n metadata[cmpt_name] = cmpt\n except ValueError:\n msg = 'Unknown filename format: {}'.format(filename)\n raise FileValidationError(msg)\n\n # fixed variables won't have a time range and so create blank values\n potential_missing_values = ['start_date', 'end_date']\n for missing_value in potential_missing_values:\n if missing_value not in metadata:\n metadata[missing_value] = None\n\n metadata['filesize'] = os.path.getsize(filename)\n\n for freq in FREQUENCY_VALUES:\n if freq in metadata['table'].lower():\n metadata['frequency'] = freq\n break\n if 'frequency' not in metadata:\n # set a blank frequency if one hasn't been found\n metadata['frequency'] = ''\n\n return metadata", "def extract_line_information(line_information):\n file_and_line = line_information.split(\":\")\n # This is a dirty windows specific hack to deal with drive letters in the\n # start of the file-path, i.e D:\\\n if len(file_and_line[0]) == 1:\n # If the first component is just one letter, we did an accidental split\n file_and_line[1] = file_and_line[0] + \":\" + file_and_line[1]\n # Join the first component back up with the second and discard it.\n file_and_line = file_and_line[1:]\n\n if len(file_and_line) != 2 and len(file_and_line) != 3:\n return None\n # The case where we have no line number, in this case we return the line\n # number as 1 to mark the whole file.\n if len(file_and_line) == 2:\n line_num = 1\n if len(file_and_line) == 3:\n try:\n line_num = int(file_and_line[1])\n except ValueError:\n return None\n\n file_name = os.path.relpath(file_and_line[0])\n return file_name, line_num", "def getFileInfoFromXML(thisfile):\n\n pfn = thisfile.getElementsByTagName(\"pfn\")[0].getAttribute(\"name\")\n lfn = thisfile.getElementsByTagName(\"lfn\")[0].getAttribute(\"name\")\n guid = thisfile.getAttribute(\"ID\")\n\n return lfn, pfn, guid", "def read_file(file_name):\n return open(os.path.join(os.path.dirname(os.path.dirname(__file__)), file_name)).read()", "def _get_fname(self, input_fname):\n # Check whether input_fname exists.\n if not os.path.isfile(input_fname):\n # Check to see whether the uncompressed version is available instead\n if not os.path.isfile(input_fname[:-3]):\n msg = \"Input filename %s is not a file\"\n raise IOError(msg % input_fname)\n else:\n msg = (\n \"Input filename ``%s`` is not a file. \\n\"\n \"However, ``%s`` exists, so change your input_fname accordingly.\"\n )\n raise IOError(msg % (input_fname, input_fname[:-3]))\n\n return _passively_decode_string(os.path.abspath(input_fname))", "def file_by_file_name(self, file_name):\n ls = self.files.filter_by(file_name=file_name)\n return ls[0] if 0 < len(ls) else None", "def extract_file(path):", "def _read_file(self) -> str:\n with open(self._file_name) as fp:\n return fp.read()", "def extract_metadata(name):\n seps = name.count(\" - \")\n artist = title = None\n\n if seps == 1:\n\n pos = name.find(\" - \")\n artist = name[:pos].strip()\n title = name[pos + 3:].strip()\n\n else:\n title = name.strip()\n\n return dict(artist=artist, title=title)", "def parseFileLine(self, line):\n c = line.strip().split(\":\")\n return (c[0], c[1], c[2], c[3])", "def read_photo_date(file_name):\n # Open image file for reading (binary mode)\n fd = open(file_name, 'rb')\n\n # Return Exif tags\n tags = exifread.process_file(fd)\n try:\n date_time = tags['EXIF DateTimeOriginal']\n except KeyError:\n date_time = get_timestamp_from_mp4(os.path.basename(file_name))\n if date_time == \"\":\n # date time info is not valid in exif, try to get file's create time\n date_time = get_file_modification_time(file_name)\n \n\n log(str(date_time) + \"--->\" + str(file_name))\n\n #parse date time string and returns tuple\n words = str(date_time).split(' ')[0].split(':') #2013:11:16 17:44:16\n if len(words) == 3:\n y = words[0]\n m = words[1]\n d = words[2]\n else:\n words = str(date_time).split(' ')[0].split('-') # 2015-01-08 16:05:13\n y = words[0]\n m = words[1]\n d = words[2]\n\n #returns a tuple\n return y, m, d", "def _parse_file(cls, filepath):\n hdus = sunpy.io.read_file(filepath)\n return cls._parse_hdus(hdus)", "def extractRunInfo(filename):\n tokens = filename.split('_')\n loading = tokens[1].strip('LiF')\n polymer = tokens[2].strip('.m')\n return (float(loading)/100, polymer)", "def extract_date_metadata(fname):\n\n try:\n # check if file has creation date, exception if not\n date_metadata = fileops.get_video_creation_date_metadata(fname)\n\n # extract the date/time string from metadata, exception if\n # not the proper format\n datetimestr = metadata_to_datetimestr(date_metadata)\n\n logging.debug(\"Found creation date metadata %r for file %r\",\n datetimestr, os.path.basename(fname))\n\n return datetimestr\n\n except fileops.VideoMetadataError:\n logging.warning(\n \"%r does not have a proper creation date metadata\",\n os.path.basename(fname))\n\n return \"\"\n\n except DateStrError:\n logging.warning(\n \"%r creation data metadata not the right format\",\n os.path.basename(fname))\n \n return \"\"" ]
[ "0.70860726", "0.7010818", "0.6978658", "0.69049", "0.68632764", "0.66971594", "0.6577073", "0.64977664", "0.64931035", "0.64348495", "0.6431073", "0.6293928", "0.6288821", "0.6240199", "0.623606", "0.61522275", "0.61236763", "0.6095241", "0.607708", "0.60727507", "0.6071609", "0.60508907", "0.6012439", "0.6008246", "0.6001871", "0.5996577", "0.5996046", "0.5976389", "0.5971759", "0.5956237", "0.59540296", "0.5953855", "0.59486103", "0.59392416", "0.5931048", "0.5929552", "0.59120685", "0.5895985", "0.5862036", "0.5842078", "0.5813745", "0.5791113", "0.5778634", "0.57774085", "0.5772407", "0.5763297", "0.574481", "0.5744074", "0.57389545", "0.5733274", "0.5732254", "0.57124656", "0.57110274", "0.5708053", "0.5698728", "0.56970876", "0.56880844", "0.56879514", "0.56842554", "0.56788296", "0.56606656", "0.56597143", "0.5656076", "0.56535745", "0.56529963", "0.5647726", "0.56452334", "0.56307393", "0.56204605", "0.56194013", "0.5616566", "0.5613595", "0.5598651", "0.55939513", "0.55924505", "0.5586028", "0.5577696", "0.55723655", "0.5569776", "0.55676234", "0.5558995", "0.5541225", "0.5540112", "0.55400085", "0.5538741", "0.553717", "0.5532061", "0.5530817", "0.5530241", "0.5527616", "0.5526215", "0.55259633", "0.55221283", "0.5516252", "0.55134815", "0.550896", "0.5502656", "0.5493463", "0.5490918", "0.5489737" ]
0.71029544
0
Bug 1660259 Correct nav arialabel and label and description for theme buttons, part {index}.
def migrate(ctx): ctx.add_transforms( "browser/browser/newtab/onboarding.ftl", "browser/browser/newtab/onboarding.ftl", transforms_from(""" onboarding-multistage-theme-tooltip-automatic-2 = .title = { COPY_PATTERN(from_path, "onboarding-multistage-theme-tooltip-automatic.title") } onboarding-multistage-theme-description-automatic-2 = .aria-description = { COPY_PATTERN(from_path, "onboarding-multistage-theme-tooltip-automatic.title") } onboarding-multistage-theme-tooltip-light-2 = .title = { COPY_PATTERN(from_path, "onboarding-multistage-theme-tooltip-light.title") } onboarding-multistage-theme-description-light = .aria-description = { COPY_PATTERN(from_path, "onboarding-multistage-theme-tooltip-light.title") } onboarding-multistage-theme-tooltip-dark-2 = .title = { COPY_PATTERN(from_path, "onboarding-multistage-theme-tooltip-dark.title") } onboarding-multistage-theme-description-dark = .aria-description = { COPY_PATTERN(from_path, "onboarding-multistage-theme-tooltip-dark.title") } onboarding-multistage-theme-tooltip-alpenglow-2 = .title = { COPY_PATTERN(from_path, "onboarding-multistage-theme-tooltip-alpenglow.title") } onboarding-multistage-theme-description-alpenglow = .aria-description = { COPY_PATTERN(from_path, "onboarding-multistage-theme-tooltip-alpenglow.title") } """, from_path="browser/browser/newtab/onboarding.ftl"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_button_label(self,index):\r\n self.l_debug(\"_get_button_label\",\"index=%d\" % (index))\r\n # TODO: Make sure it's a valid index?\r\n return self.parent.harmony_config['info']['functions'][index]['label']", "def change_back_alphabet_button(event):\n img_alphabet_button_mouse_return = image_abc[abc_index]\n a_label = buttons_abc[abc_index]\n a_label.config(image=img_alphabet_button_mouse_return)\n a_label.image = img_alphabet_button_mouse_return\n a_label.grid(row=ALPHA_ROW, column=ALPHA_COL, pady=2)", "def editNavBar(src, des, tipe, Xrc):\n old_title = src.split(\"/\")[-1].replace(\".html\", \"\")\n new_title = des.split(\"/\")[-1].replace(\".html\", \"\")\n index = \"Xblog/docs/index.html\"\n with open(index, 'r') as f:\n soup = BeautifulSoup(f, \"html.parser\")\n f.close()\n tag = soup.select(\"#\"+old_title)[0]\n old_src = tag.a[\"onclick\"].split(\"; \")[1].split('(')[1].split(')')[0]\n if tipe == 'Xbook':\n new_src = '\\'\\\\\\\\' + Xrc[\"gh_repo_name\"] + '/' + 'notebooks/' + new_title + '/index.html\\''\n if tipe == 'Xpage':\n new_src = '\\'\\\\\\\\' + Xrc[\"gh_repo_name\"] + '/' + 'notebooks/' + new_title + '.html\\''\n tag.a.string = tag.a.string.replace(old_title, new_title)\n tag.a[\"onclick\"] = tag.a[\"onclick\"].replace(old_src, new_src)\n tag.a[\"id\"] = new_title\n with open(index, 'w') as f:\n f.write(soup.prettify(formatter=\"html\"))\n f.close()\n ccc.success(\"updating \" + des + \" from navigation pallete\")", "def change_alphabet_buttons(event):\n another_image = image_abc_active[abc_index]\n a_label = buttons_abc[abc_index]\n a_label.config(image=another_image)\n a_label.image = another_image\n a_label.grid(row=ALPHA_ROW, column=ALPHA_COL, pady=2)", "def _leadingIcons(self):", "def generate_buttons(self):\n raise Exception('Implement me!')", "def updateTabBtns(self):\n index = self.currentIndex()\n try:\n children = self.widget(index).children()\n except:\n logger.debug(\"UpdateTabBtns: tab has no buttons\")\n for c in children:\n if (isinstance(c,drag.DragButton)):\n c.updateNumSel() #update outline of button", "def create_frame_icons(self):\n self.text = \"{}\".format(self.name)\n self.y = self.startY - 10 if self.startY - 10 > 10 else self.startY + 10\n self.colorIndex = LABELS.index(self.name)", "def retranslate(self):\n\t\t#NOTE: mnemonics are not recognized for this checkbox. no idea why\n\t\tself.button.set_label('')\n\t\tself.button.child.set_text_with_mnemonic(_('_...'))\n\t\tself.button.set_tooltip_text(_('Select a directory'))\n\t\tself.edit.set_tooltip_text(_('Directory location'))", "def load_main_gui(self, gui_handle):\n elementsToModify = [\"butZoomIn\", \"butZoomOut\", \"butGoToLoc\",\n \"butChrUp\", \"butChrDown\"]\n\n for element in elementsToModify:\n xrc.XRCCTRL(gui_handle, element).SetLabel(self.__data[element])\n\n return(True)\n\n # the menus are a little different:\n elementsToModify = [\"file\", \"about\", \"help\"]\n\n menu_head = gui_handle.GetMenuBar()\n\n for element in elementsToModify:\n item = menu_head.FindItem(element)\n print item\n if item:\n menu_item = menu_head.GetMenu(item)\n menu_item.SetLabel(element)", "def on_pushButton_4_clicked(self):\n # TODO: not implemented yet\n self.label.setText('[1/200]页')", "def addToolBarButtons(self):", "def _helpmenu_about():\n self.helpindex = Toplevel(self.master)\n self.helpindex.title(\"About\")\n self.helpindex.geometry(\"500x300\")\n self.helpindex.label()", "def update_navigation_buttons(self):\n test = self.artists_list.currentIndex() # .row()\n self.prev_artist_button.setEnabled(True)\n self.next_artist_button.setEnabled(True)\n if test == 0:\n self.prev_artist_button.setEnabled(False)\n if test == len(self.c_artists) - 1:\n self.next_artist_button.setEnabled(False)\n test = self.albums_list.currentIndex() # .row()\n self.prev_album_button.setEnabled(True)\n self.next_album_button.setEnabled(True)\n if test == 0:\n self.prev_album_button.setEnabled(False)\n if test == len(self.c_albums) - 1:\n self.next_album_button.setEnabled(False)", "def show_main_buttons(self):\n pass", "def button_captions(self, obj):\n return {\n 'BO_SAVE_CAPTION': '',\n 'BO_SAVE_AS_NEW_CAPTION': '',\n 'BO_SAVE_AND_CONT_CAPTION': '',\n 'BO_SAVE_AND_ADD_ANOTHER_CAPTION': '',\n 'BO_DELETE_CAPTION': '',\n }", "def widget_terminal_title(\n widget: 'pygame_menu.widgets.Widget',\n widget_index: int = -1,\n current_index: int = -1\n) -> str:\n w_class_id = TerminalColors.BOLD + widget.get_class_id() + TerminalColors.ENDC\n if isinstance(widget, pygame_menu.widgets.Frame):\n w_title = TerminalColors.BRIGHT_WHITE + '┌━' + TerminalColors.ENDC\n w_title += f'{0} - {3}[{1},{2},'.format(w_class_id, *widget.get_indices(), TerminalColors.LGREEN)\n if widget.horizontal:\n w_title += 'H] '\n else:\n w_title += 'V] '\n if widget.is_scrollable:\n wsz = widget.get_inner_size()\n wsm = widget.get_max_size()\n wsh = wsm[0] if wsm[0] == wsz[0] else f'{wsm[0]}→{wsz[0]}'\n wsv = wsm[1] if wsm[1] == wsz[1] else f'{wsm[1]}→{wsz[1]}'\n w_title += f'∑ [{wsh},{wsv}] '\n w_title += TerminalColors.ENDC\n else:\n if widget.get_title() != '':\n title_f = TerminalColors.UNDERLINE + widget.get_title() + TerminalColors.ENDC\n w_title = f'{w_class_id} - {title_f} - '\n else:\n w_title = w_class_id + ' - '\n\n # Column/Row position\n w_title += TerminalColors.INDIGO\n cr = widget.get_col_row_index()\n w_title += '{' + str(cr[0]) + ',' + str(cr[1]) + '}'\n w_title += TerminalColors.ENDC\n\n # Add position\n w_title += TerminalColors.MAGENTA\n w_title += ' ({0},{1})'.format(*widget.get_position())\n w_title += TerminalColors.ENDC\n\n # Add size\n w_title += TerminalColors.BLUE\n w_title += ' ({0},{1})'.format(*widget.get_size())\n w_title += TerminalColors.ENDC\n\n # Add mods\n w_title += TerminalColors.CYAN\n if widget.is_floating():\n w_title += ' Φ'\n if not widget.is_visible():\n w_title += ' ╳'\n if not widget.is_selectable:\n w_title += ' β'\n if widget.is_selected():\n w_title += TerminalColors.BOLD + ' ⟵'\n if current_index != -1 and current_index != widget_index:\n w_title += f'! [{widget_index}->{current_index}]'\n if widget.get_menu() is None:\n w_title += ' !▲'\n w_title += TerminalColors.ENDC\n\n return w_title", "def _icons(self):", "def update_navigation_buttons(self):\n test = self.artist_list.currentIndex() # .row()\n self.prev_artist_button.setEnabled(True)\n self.next_artist_button.setEnabled(True)\n if test == 0:\n self.prev_artist_button.setEnabled(False)\n if test == len(self.c_artists) - 1:\n self.next_artist_button.setEnabled(False)\n self.focus_albums()", "def _tab_changed(self, index):\n if index == 0:\n #self._poplulate_labels(self.digital_labels)\n self._setup_table_digital()\n elif index == 1:\n #self._poplulate_labels(self.analog_labels)\n self._setup_table_analog()\n else:\n self._setup_table_counter()", "def _trailingIcons(self):", "def resetNamesButtonClicked(self):\n self.writeFile(True)\n for key, binding in self.loadCfg():\n self.writePipe('bind ' + key + ' None')\n while self.ui.tv_bindings.topLevelItemCount() > 0:\n self.ui.tv_bindings.takeTopLevelItem(0)\n itemList = []\n for i in self.loadCfg():\n item = QtGui.QTreeWidgetItem(i)\n itemList.append(item)\n item.setFlags(QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)\n self.ui.tv_bindings.addTopLevelItems(itemList)\n self.ui.but_g1.setText(\"\")\n self.ui.but_g2.setText(\"\")\n self.ui.but_g3.setText(\"\")\n self.ui.but_g4.setText(\"\")\n self.ui.but_g5.setText(\"\")\n self.ui.but_g6.setText(\"\")\n self.ui.but_g7.setText(\"\")\n self.ui.but_g8.setText(\"\")\n self.ui.but_g9.setText(\"\")\n self.ui.but_g10.setText(\"\")\n self.ui.but_g11.setText(\"\")\n self.ui.but_g12.setText(\"\")\n self.ui.but_g13.setText(\"\")\n self.ui.but_g14.setText(\"\")\n self.ui.but_g15.setText(\"\")\n self.ui.but_g16.setText(\"\")\n self.ui.but_g17.setText(\"\")\n self.ui.but_g18.setText(\"\")\n self.ui.but_g19.setText(\"\")\n self.ui.but_g20.setText(\"\")\n self.ui.but_g21.setText(\"\")\n self.ui.but_g22.setText(\"\")\n self.ui.but_m1.setText(\"\")\n self.ui.but_m2.setText(\"\")\n self.ui.but_m3.setText(\"\")\n self.ui.but_mr.setText(\"\")\n self.ui.but_l1.setText(\"\")\n self.ui.but_l2.setText(\"\")\n self.ui.but_l3.setText(\"\")\n self.ui.but_l4.setText(\"\")", "def set_arrows(self):\n # Up\n mask = str(self.n_parent) + \":\" + str(self.n - 1)\n #if self.d.get(mask):\n # self.upBtn.SetLabelText(\"^\")\n #else:\n # self.upBtn.SetLabelText(\"\")\n # if self.m_Mnemo.Value == True:\n # self.upBtn.Hide()\n # else:\n # self.upBtn.Show()\n\n # down\n mask = str(self.n_parent) + \":\" + str(self.n + 1)\n #if self.d.get(mask):\n #self.down_Btn.SetLabelText(\"v\")\n #else:\n # if not self.m_Mnemo.Value:\n # self.down_Btn.SetLabelText(\"\")\n # self.down_Btn.Enable()\n # self.down_Btn.Show()\n # else:\n # self.down_Btn.SetLabelText(\"\")\n # #self.down_Btn.Disable()\n # #self.down_Btn.Hide()\n # if self.m_Mnemo.Value == True:\n # self.down_Btn.Hide()\n # else:\n # self.down_Btn.Show()\n\n # left\n mask = str(self.n_parent)\n if not self.n_parent == 0:\n if self.d.get(mask):\n self.leftBtn.SetLabelText(\"<\")\n else:\n self.leftBtn.SetLabelText(\"\")\n else:\n self.leftBtn.SetLabelText(\"\")\n\n # right\n mask = str(self.n_parent) + \":\" + str(self.n) + \":\" + str(1)\n if self.d.get(mask):\n self.rightBtn.SetLabelText(\">\")\n else:\n if not self.m_Mnemo.Value:\n self.rightBtn.SetLabelText(\"\")\n self.rightBtn.Enable()\n self.rightBtn.Show()\n else:\n self.rightBtn.SetLabelText(\"\")", "def test_get_custom_buttons_list(self):\n pass", "def test_get_custom_button(self):\n pass", "def toggleNamesButtonClicked(self):\n nameTable = {}\n if self.keyNames:\n for i in self.loadCfg():\n nameTable[i[0]] = i[1][4:]\n self.keyNames = False\n else:\n for i in self.loadCfg():\n nameTable[i[0]] = i[0]\n self.keyNames = True\n self.ui.but_g1.setText(nameTable[\"G1\"])\n self.ui.but_g2.setText(nameTable[\"G2\"])\n self.ui.but_g3.setText(nameTable[\"G3\"])\n self.ui.but_g4.setText(nameTable[\"G4\"])\n self.ui.but_g5.setText(nameTable[\"G5\"])\n self.ui.but_g6.setText(nameTable[\"G6\"])\n self.ui.but_g7.setText(nameTable[\"G7\"])\n self.ui.but_g8.setText(nameTable[\"G8\"])\n self.ui.but_g9.setText(nameTable[\"G9\"])\n self.ui.but_g10.setText(nameTable[\"G10\"])\n self.ui.but_g11.setText(nameTable[\"G11\"])\n self.ui.but_g12.setText(nameTable[\"G12\"])\n self.ui.but_g13.setText(nameTable[\"G13\"])\n self.ui.but_g14.setText(nameTable[\"G14\"])\n self.ui.but_g15.setText(nameTable[\"G15\"])\n self.ui.but_g16.setText(nameTable[\"G16\"])\n self.ui.but_g17.setText(nameTable[\"G17\"])\n self.ui.but_g18.setText(nameTable[\"G18\"])\n self.ui.but_g19.setText(nameTable[\"G19\"])\n self.ui.but_g20.setText(nameTable[\"G20\"])\n self.ui.but_g21.setText(nameTable[\"G21\"])\n self.ui.but_g22.setText(nameTable[\"G22\"])\n self.ui.but_m1.setText(nameTable[\"M1\"])\n self.ui.but_m2.setText(nameTable[\"M2\"])\n self.ui.but_m3.setText(nameTable[\"M3\"])\n self.ui.but_mr.setText(nameTable[\"MR\"])\n self.ui.but_l1.setText(nameTable[\"L1\"])\n self.ui.but_l2.setText(nameTable[\"L2\"])\n self.ui.but_l3.setText(nameTable[\"L3\"])\n self.ui.but_l4.setText(nameTable[\"L4\"])\n self.ui.but_down.setText(nameTable[\"DOWN\"])\n self.ui.but_stickTop.setText(nameTable[\"TOP\"])\n nameTable[\"LEFT\"] = createVerticalText(nameTable[\"LEFT\"])\n self.ui.but_left.setText(nameTable[\"LEFT\"])\n if nameTable[\"STICK_UP\"] == \"STICK_UP\":\n nameTable[\"STICK_UP\"] = \"UP\"\n self.ui.but_stickUp.setText(nameTable[\"STICK_UP\"])\n if nameTable[\"STICK_DOWN\"] == \"STICK_DOWN\":\n nameTable[\"STICK_DOWN\"] = \"DOWN\"\n self.ui.but_stickDown.setText(nameTable[\"STICK_DOWN\"])\n if nameTable[\"STICK_LEFT\"] == \"STICK_LEFT\":\n nameTable[\"STICK_LEFT\"] = \"L\"\n else:\n nameTable[\"STICK_LEFT\"] = createVerticalText(nameTable[\"STICK_LEFT\"])\n self.ui.but_stickLeft.setText(nameTable[\"STICK_LEFT\"])\n if nameTable[\"STICK_RIGHT\"] == \"STICK_RIGHT\":\n nameTable[\"STICK_RIGHT\"] = \"R\"\n else:\n nameTable[\"STICK_RIGHT\"] = createVerticalText(nameTable[\"STICK_RIGHT\"])\n self.ui.but_stickRight.setText(nameTable[\"STICK_RIGHT\"])", "def create_buttons(self):\r\n return []", "def create_menus( self ):", "def test_issue_edit_label(self):\n pass", "def update_frame_label(self):\n count = len(self.main_frame_list)\n\n for idx in range(count): #Start, count) \n s1 = \"\"\n for i in range(16): #self.main_frame_nibble_list: # 16\n s = \"\"\n for j in range(4):\n s += str(self.main_button_bit_list[idx][i*4 + j].get_label())\n s = s[::-1]\n self.main_frame_nibble_list[idx][i].set_label(str(hex(int(s,2)))[2:].upper())\n s1 += str(self.main_frame_nibble_list[idx][i].get_label())\n s1 = s1[::-1]\n if DEBUG: print(s1[:8] + \" \" + s1[8:])\n self.main_frame_list[idx].set_label(s1[:8] + \" \" + s1[8:])", "def back_to_menu_info(cls):\n print(\n \"\"\"\n ________________________________________________\n\n HABITSBOX\n ________________________________________________\n Hint: Press 0 (zero) to return to the main menu\n ------------------------------------------------\"\"\")", "def habHelp(self):\n rf = os.path.join('docs','helpButtons','prefsHabitat.html')\n self.showHelpFile( rf )", "def on_pushButton_5_clicked(self):\n # TODO: not implemented yet\n self.label.setText('[200/200]页')", "def get_title_menu(self):\n return _(self.view_label).capitalize()", "def buttons_dict(phrase):\n switcher = {\n '처음으로': ['병원 정보', '병원 위치', '병원 운영시간', '병원 프로모션'],\n '병원 정보': ['의료진', '병원 사진', '병원 진료과목', '병원 전화하기'],\n '병원 프로모션': ['프로모션 A', '프로모션 B', '프로모션 C'],\n '의료진': ['홍길동 피부과 전문의', '김제인 마취과 전문의', '김존 피부과 전문의'],\n '병원 사진': ['내부', '건물', '진료실']\n }\n default_buttons = []\n return switcher.get(phrase, default_buttons) + ['처음으로']", "def radioList(dev, draw, index):\n\n global menuindex\n global listMenuStart, listMenuEnd\n global names\n \n font = ImageFont.load_default()\n draw.rectangle(dev.bounding_box, outline=\"white\", fill=\"black\")\n if index > listMenuEnd:\n listMenuEnd += 1\n listMenuStart += 1\n elif index < listMenuStart:\n listMenuEnd -= 1\n listMenuStart -= 1\n for i in range(6):\n if( i == (index-listMenuStart)):\n menuindex = index\n invert(draw, 4, 4 + (index-listMenuStart)*10, names[listMenuStart + i][0], False)\n else:\n draw.text((4, 4 + i*10), names[listMenuStart + i][0], font = font, fill = 255)", "def retranslateUi(self):\n self.window.setWindowTitle(\"Othello\")\n self.player_1.setText(self.game._player1.read_name())\n self.score_1.setText(str(0))\n self.player_2.setText(self.game._player2.read_name())\n self.score_2.setText(str(0))\n self.menuOthello.setTitle(\"Game\")\n self.menuOptions.setTitle(\"Options\")\n self.action_new_game.setText(\"New Game\")\n self.action_undo.setText(\"Undo\")\n self.action_stats.setText(\"Stats\")", "def fix_genindex(self, tree: list[tuple[str, list[tuple[str, Any]]]]) -> None:\n # XXX: modifies tree inline\n # Logic modeled from themes/basic/genindex.html\n for _key, columns in tree:\n for _entryname, (links, subitems, _key) in columns:\n for (i, (ismain, link)) in enumerate(links):\n m = self.refuri_re.match(link)\n if m:\n links[i] = (ismain,\n self.fix_fragment(m.group(1), m.group(2)))\n for _subentryname, subentrylinks in subitems:\n for (i, (ismain, link)) in enumerate(subentrylinks):\n m = self.refuri_re.match(link)\n if m:\n subentrylinks[i] = (ismain,\n self.fix_fragment(m.group(1), m.group(2)))", "def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ROC - Robot Operational Controller\"))\n self.qrcode_App_Button.setToolTip(_translate(\"MainWindow\", \"Opens the QR Code Reader\"))\n self.qrcode_App_Button.setText(_translate(\"MainWindow\", \"QR App\"))\n self.data_Button.setToolTip(_translate(\"MainWindow\", \"Opens the Data Dashboard\"))\n self.data_Button.setText(_translate(\"MainWindow\", \"Data\"))\n self.config_Button.setToolTip(_translate(\"MainWindow\", \"Configure the ROC\"))\n self.config_Button.setText(_translate(\"MainWindow\", \"Options\"))\n self.docs_Button.setToolTip(_translate(\"MainWindow\", \"Go to the Documentation Website\"))\n self.docs_Button.setText(_translate(\"MainWindow\", \"Docs\"))\n self.about_Button.setToolTip(_translate(\"MainWindow\", \"Contact and About\"))\n self.about_Button.setText(_translate(\"MainWindow\", \"About\"))\n self.robot_Viewer_Label.setText(_translate(\"MainWindow\", \"ROBOT VIEWER\"))\n self.terminalWidget.setTabText(self.terminalWidget.indexOf(self.urxvtWidget), _translate(\"MainWindow\", \"urxvt\"))\n self.robot_TB1_Status.setToolTip(_translate(\"MainWindow\", \"Ping and Show Robot 1 Status (GREY = DESABLED, BLACK = OFF, LIGHT = ON, RED = INACTIVE)\"))\n self.robot_TB1_Status.setText(_translate(\"MainWindow\", \"TB1 STATUS\"))\n self.robot_TB1_Viewer.setToolTip(_translate(\"MainWindow\", \"Open Robot 1 Viewer\"))\n self.robot_TB1_Viewer.setText(_translate(\"MainWindow\", \"ROBOT 1\"))\n self.robot_TB2_Viewer.setToolTip(_translate(\"MainWindow\", \"Open Robot 2 Viewer\"))\n self.robot_TB2_Viewer.setText(_translate(\"MainWindow\", \"ROBOT 2\"))\n self.robot_TB3_Viewer.setToolTip(_translate(\"MainWindow\", \"Open Robot 3 Viewer\"))\n self.robot_TB3_Viewer.setText(_translate(\"MainWindow\", \"ROBOT 3\"))\n self.robot_TB4_Viewer.setToolTip(_translate(\"MainWindow\", \"Open Robot 4 Viewer\"))\n self.robot_TB4_Viewer.setText(_translate(\"MainWindow\", \"ROBOT 4\"))\n self.robot_TB2_Status.setToolTip(_translate(\"MainWindow\", \"Ping and Show Robot 2 Status (GREY = DESABLED, BLACK = OFF, LIGHT = ON, RED = INACTIVE)\"))\n self.robot_TB2_Status.setText(_translate(\"MainWindow\", \"TB2 STATUS\"))\n self.robot_TB3_Status.setToolTip(_translate(\"MainWindow\", \"Ping and Show Robot 3 Status (GREY = DESABLED, BLACK = OFF, LIGHT = ON, RED = INACTIVE)\"))\n self.robot_TB3_Status.setText(_translate(\"MainWindow\", \"TB3 STATUS\"))\n self.robot_TB4_Status.setToolTip(_translate(\"MainWindow\", \"Ping and Show Robot 4 Status (GREY = DESABLED, BLACK = OFF, LIGHT = ON, RED = INACTIVE)\"))\n self.robot_TB4_Status.setText(_translate(\"MainWindow\", \"TB4 STATUS\"))\n self.robot_TB1.setTitle(_translate(\"MainWindow\", \" Robot 1 (TB1)\"))\n self.configure_TB1_Button.setToolTip(_translate(\"MainWindow\", \"Opens the Settings for Robot 1 (TB1)\"))\n self.configure_TB1_Button.setText(_translate(\"MainWindow\", \"Settings\"))\n self.logs_TB1_Button.setToolTip(_translate(\"MainWindow\", \"Open the Logs for Robot 1 (TB1)\"))\n self.logs_TB1_Button.setText(_translate(\"MainWindow\", \"Logs\"))\n self.floor_TB1_Show.setToolTip(_translate(\"MainWindow\", \"Shows the Floor Map\"))\n self.floor_TB1_Show.setText(_translate(\"MainWindow\", \"FLOOR\"))\n self.kinect_TB1_Show.setToolTip(_translate(\"MainWindow\", \"Shows the Kinect Camera\"))\n self.kinect_TB1_Show.setText(_translate(\"MainWindow\", \"KINECT\"))\n self.gmapp_TB1_Show.setToolTip(_translate(\"MainWindow\", \"Shows the GMAPP\"))\n self.gmapp_TB1_Show.setText(_translate(\"MainWindow\", \"GMAPP\"))\n self.camera_TB1_Show.setToolTip(_translate(\"MainWindow\", \"Shows the Notebook Camera\"))\n self.camera_TB1_Show.setText(_translate(\"MainWindow\", \"CAMERA\"))\n self.on_TB1_Viewer.setToolTip(_translate(\"MainWindow\", \"Turn Viewer On\"))\n self.on_TB1_Viewer.setText(_translate(\"MainWindow\", \"ON\"))\n self.off_TB1_Viewer.setToolTip(_translate(\"MainWindow\", \"Turn Viewer Off\"))\n self.off_TB1_Viewer.setText(_translate(\"MainWindow\", \"OFF\"))\n self.reload_TB1.setToolTip(_translate(\"MainWindow\", \"Reload the Viewer\"))\n self.reload_TB1.setText(_translate(\"MainWindow\", \"RELOAD\"))\n self.reset_TB1.setToolTip(_translate(\"MainWindow\", \"Reset the Viewer\"))\n self.reset_TB1.setText(_translate(\"MainWindow\", \"RESET\"))\n self.valuesTB1Frame.setToolTip(_translate(\"MainWindow\", \"Shows Robot 1 (TB1) Data\"))\n self.x_TB1_Label.setText(_translate(\"MainWindow\", \"X:\"))\n self.y_TB1_Label.setText(_translate(\"MainWindow\", \"Y:\"))\n self.velocity_TB1_Label.setText(_translate(\"MainWindow\", \"Velocity:\"))\n self.battery_TB1_Label.setText(_translate(\"MainWindow\", \"Battery:\"))\n self.kinnect_TB1_Screen.setToolTip(_translate(\"MainWindow\", \"Image from the Kinnect\"))\n self.viewer_TB1.setTabText(self.viewer_TB1.indexOf(self.kinnect_TB1_Screen), _translate(\"MainWindow\", \"ROBOT\"))\n self.viewer_TB1.setTabText(self.viewer_TB1.indexOf(self.gmapp_TB1_Screen), _translate(\"MainWindow\", \"GMAPP\"))\n self.viewer_TB1.setTabText(self.viewer_TB1.indexOf(self.floor_TB1_Screen), _translate(\"MainWindow\", \"FLOOR\"))\n self.viewer_TB1.setTabText(self.viewer_TB1.indexOf(self.camera_TB1_Screen), _translate(\"MainWindow\", \"CAMERA\"))\n self.robot_Selection_Label.setText(_translate(\"MainWindow\", \"CONFIGURATIONS\"))\n self.robot_Selection_TypeLabel.setText(_translate(\"MainWindow\", \"ROBOT TYPE:\"))\n self.robot_Selection_Type.setToolTip(_translate(\"MainWindow\", \"Select the Robot Type\"))\n self.robot_Selection_Type.setItemText(0, _translate(\"MainWindow\", \"NONE\"))\n self.robot_Selection_Type.setItemText(1, _translate(\"MainWindow\", \"TURTLEBOT\"))\n self.robot_Selection_Type.setItemText(2, _translate(\"MainWindow\", \"DRONE\"))\n self.robot_Selection_Role.setToolTip(_translate(\"MainWindow\", \"Select the Robot Role\"))\n self.robot_Selection_Role.setItemText(0, _translate(\"MainWindow\", \"NONE\"))\n self.robot_Selection_Role.setItemText(1, _translate(\"MainWindow\", \"ROLE 1\"))\n self.robot_Selection_Role.setItemText(2, _translate(\"MainWindow\", \"ROLE 2\"))\n self.robot_Selection_RoleLabel.setText(_translate(\"MainWindow\", \"ROBOT ROLE:\"))\n self.robot_Selection_TaskLabel.setText(_translate(\"MainWindow\", \"ROBOT TASK:\"))\n self.robot_Selection_Task.setToolTip(_translate(\"MainWindow\", \"Select the Robot Task\"))\n self.robot_Selection_Task.setItemText(0, _translate(\"MainWindow\", \"NONE\"))\n self.robot_Selection_Task.setItemText(1, _translate(\"MainWindow\", \"TASK 1\"))\n self.robot_Selection_Task.setItemText(2, _translate(\"MainWindow\", \"TASK 2\"))\n self.robot_Selection_BehaviorLabel.setText(_translate(\"MainWindow\", \"BEHAVIOR:\"))\n self.robot_Selection_Behavior.setToolTip(_translate(\"MainWindow\", \"Select an Behavior\"))\n self.robot_Selection_Behavior.setItemText(0, _translate(\"MainWindow\", \"NONE\"))\n self.robot_Selection_Behavior.setItemText(1, _translate(\"MainWindow\", \"TASK 1\"))\n self.robot_Selection_Behavior.setItemText(2, _translate(\"MainWindow\", \"TASK 2\"))\n self.robot_Selection_Experiment.setToolTip(_translate(\"MainWindow\", \"Select a Experiment\"))\n self.robot_Selection_Experiment.setItemText(0, _translate(\"MainWindow\", \"NONE\"))\n self.robot_Selection_Experiment.setItemText(1, _translate(\"MainWindow\", \"EXP. 1\"))\n self.robot_Selection_Experiment.setItemText(2, _translate(\"MainWindow\", \"EXP. 2\"))\n self.robot_Selection_ExpLabel.setText(_translate(\"MainWindow\", \"EXPERIMENT:\"))\n self.set_Selection_Values.setToolTip(_translate(\"MainWindow\", \"Set the Values Selected\"))\n self.set_Selection_Values.setText(_translate(\"MainWindow\", \"SET\"))\n self.reset_Selection_Values.setToolTip(_translate(\"MainWindow\", \"Reset the Values Selected\"))\n self.reset_Selection_Values.setText(_translate(\"MainWindow\", \"R\"))\n self.robot_TB1_Selection.setToolTip(_translate(\"MainWindow\", \"Select Robot 1\"))\n self.robot_TB1_Selection.setText(_translate(\"MainWindow\", \"1\"))\n self.robot_TB2_Selection.setToolTip(_translate(\"MainWindow\", \"Select Robot 2\"))\n self.robot_TB2_Selection.setText(_translate(\"MainWindow\", \"2\"))\n self.robot_TB4_Selection.setToolTip(_translate(\"MainWindow\", \"Select Robot 4\"))\n self.robot_TB4_Selection.setText(_translate(\"MainWindow\", \"4\"))\n self.robot_TB3_Selection.setToolTip(_translate(\"MainWindow\", \"Select Robot 3\"))\n self.robot_TB3_Selection.setText(_translate(\"MainWindow\", \"3\"))\n self.run_Selection_Values.setToolTip(_translate(\"MainWindow\", \"Run / Start the Experiment\"))\n self.run_Selection_Values.setText(_translate(\"MainWindow\", \"RUN\"))\n self.down_Selection_Values.setToolTip(_translate(\"MainWindow\", \"Shut Down the Robot\"))\n self.down_Selection_Values.setText(_translate(\"MainWindow\", \"D\"))\n self.robot_TB2.setTitle(_translate(\"MainWindow\", \" Robot 2 (TB2)\"))\n self.configure_TB2_Button.setText(_translate(\"MainWindow\", \"Settings\"))\n self.logs_TB2_Button.setText(_translate(\"MainWindow\", \"Logs\"))\n self.floor_TB2_Show.setText(_translate(\"MainWindow\", \"FLOOR\"))\n self.kinect_TB2_Show.setText(_translate(\"MainWindow\", \"KINECT\"))\n self.gmapp_TB2_Show.setText(_translate(\"MainWindow\", \"GMAPP\"))\n self.camera_TB2_Show.setText(_translate(\"MainWindow\", \"CAMERA\"))\n self.on_TB2_Viewer.setText(_translate(\"MainWindow\", \"ON\"))\n self.off_TB2_Viewer.setText(_translate(\"MainWindow\", \"OFF\"))\n self.reload_TB2.setText(_translate(\"MainWindow\", \"RELOAD\"))\n self.reset_TB2.setText(_translate(\"MainWindow\", \"RESET\"))\n self.x_TB2_Label.setText(_translate(\"MainWindow\", \"X:\"))\n self.y_TB2_Label.setText(_translate(\"MainWindow\", \"Y:\"))\n self.velocity_TB2_Label.setText(_translate(\"MainWindow\", \"Velocity:\"))\n self.battery_TB2_Label.setText(_translate(\"MainWindow\", \"Battery:\"))\n self.kinnect_TB2_Screen.setToolTip(_translate(\"MainWindow\", \"Image from the Kinnect\"))\n self.viewer_TB2.setTabText(self.viewer_TB2.indexOf(self.kinnect_TB2_Screen), _translate(\"MainWindow\", \"ROBOT\"))\n self.viewer_TB2.setTabText(self.viewer_TB2.indexOf(self.gmapp_TB2_Screen), _translate(\"MainWindow\", \"GMAPP\"))\n self.viewer_TB2.setTabText(self.viewer_TB2.indexOf(self.floor_TB2_Screen), _translate(\"MainWindow\", \"FLOOR\"))\n self.viewer_TB2.setTabText(self.viewer_TB2.indexOf(self.camera_TB2_Screen), _translate(\"MainWindow\", \"CAMERA\"))\n self.label.setToolTip(_translate(\"MainWindow\", \"ROC Version\"))\n self.label.setText(_translate(\"MainWindow\", \"Alpha v0.1\"))\n self.robot_TB3.setTitle(_translate(\"MainWindow\", \" Robot 3 (TB3)\"))\n self.configure_TB3_Button.setText(_translate(\"MainWindow\", \"Settings\"))\n self.logs_TB3_Button.setText(_translate(\"MainWindow\", \"Logs\"))\n self.floor_TB3_Show.setText(_translate(\"MainWindow\", \"FLOOR\"))\n self.kinect_TB3_Show.setText(_translate(\"MainWindow\", \"KINECT\"))\n self.gmapp_TB3_Show.setText(_translate(\"MainWindow\", \"GMAPP\"))\n self.camera_TB3_Show.setText(_translate(\"MainWindow\", \"CAMERA\"))\n self.on_TB3_Viewer.setText(_translate(\"MainWindow\", \"ON\"))\n self.off_TB3_Viewer.setText(_translate(\"MainWindow\", \"OFF\"))\n self.reload_TB3.setText(_translate(\"MainWindow\", \"RELOAD\"))\n self.reset_TB3.setText(_translate(\"MainWindow\", \"RESET\"))\n self.x_TB3_Label.setText(_translate(\"MainWindow\", \"X:\"))\n self.y_TB3_Label.setText(_translate(\"MainWindow\", \"Y:\"))\n self.velocity_TB3_Label.setText(_translate(\"MainWindow\", \"Velocity:\"))\n self.battery_TB3_Label.setText(_translate(\"MainWindow\", \"Battery:\"))\n self.kinnect_TB3_Screen.setToolTip(_translate(\"MainWindow\", \"Image from the Kinnect\"))\n self.viewer_TB3.setTabText(self.viewer_TB3.indexOf(self.kinnect_TB3_Screen), _translate(\"MainWindow\", \"ROBOT\"))\n self.viewer_TB3.setTabText(self.viewer_TB3.indexOf(self.gmapp_TB3_Screen), _translate(\"MainWindow\", \"GMAPP\"))\n self.viewer_TB3.setTabText(self.viewer_TB3.indexOf(self.floor_TB3_Screen), _translate(\"MainWindow\", \"FLOOR\"))\n self.viewer_TB3.setTabText(self.viewer_TB3.indexOf(self.camera_TB3_Screen), _translate(\"MainWindow\", \"CAMERA\"))\n self.robot_TB4.setTitle(_translate(\"MainWindow\", \" Robot 4 (TB4)\"))\n self.configure_TB4_Button.setText(_translate(\"MainWindow\", \"Settings\"))\n self.logs_TB4_Button.setText(_translate(\"MainWindow\", \"Logs\"))\n self.floor_TB4_Show.setText(_translate(\"MainWindow\", \"FLOOR\"))\n self.kinect_TB4_Show.setText(_translate(\"MainWindow\", \"KINECT\"))\n self.gmapp_TB4_Show.setText(_translate(\"MainWindow\", \"GMAPP\"))\n self.camera_TB4_Show.setText(_translate(\"MainWindow\", \"CAMERA\"))\n self.on_TB4_Viewer.setText(_translate(\"MainWindow\", \"ON\"))\n self.off_TB4_Viewer.setText(_translate(\"MainWindow\", \"OFF\"))\n self.reload_TB4.setText(_translate(\"MainWindow\", \"RELOAD\"))\n self.reset_TB4.setText(_translate(\"MainWindow\", \"RESET\"))\n self.x_TB4_Label.setText(_translate(\"MainWindow\", \"X:\"))\n self.y_TB4_Label.setText(_translate(\"MainWindow\", \"Y:\"))\n self.velocity_TB4_Label.setText(_translate(\"MainWindow\", \"Velocity:\"))\n self.battery_TB4_Label.setText(_translate(\"MainWindow\", \"Battery:\"))\n self.kinnect_TB4_Screen.setToolTip(_translate(\"MainWindow\", \"Image from the Kinnect\"))\n self.viewer_TB4.setTabText(self.viewer_TB4.indexOf(self.kinnect_TB4_Screen), _translate(\"MainWindow\", \"ROBOT\"))\n self.viewer_TB4.setTabText(self.viewer_TB4.indexOf(self.gmapp_TB4_Screen), _translate(\"MainWindow\", \"GMAPP\"))\n self.viewer_TB4.setTabText(self.viewer_TB4.indexOf(self.floor_TB4_Screen), _translate(\"MainWindow\", \"FLOOR\"))\n self.viewer_TB4.setTabText(self.viewer_TB4.indexOf(self.camera_TB4_Screen), _translate(\"MainWindow\", \"CAMERA\"))", "def main_layout(modValues, mod_units, header=''):\n sg.theme('DarkAmber')\n divider = '---------------------------------------------------------------'\\\n '---------------------------------------------------------------'\n spacer = ' '\\\n ' '\n help_text = ' Help '\n\n tab1_layout = [[sg.Text(spacer+' '),\n sg.Button(help_text, key='bioprocess_help')],\n\n [sg.Button(modValues['side1'], key='side1'), sg.Text(' --> '),\n sg.Button(modValues['sub1'], key='sub1'), sg.Text(' --> '),\n sg.Button(modValues['proc1'], key='proc1'), sg.Text(' --> '),\n sg.Button(modValues['prod1'], key='prod1')\n ],\n\n [sg.Text(' | ')],\n\n [sg.Button(modValues['material'], key='material'), sg.Text(' --> '),\n sg.Button(modValues['substrate'], key='substrate'), sg.Text(' --> '),\n sg.Button(modValues['process'], key='process'), sg.Text(' --> '),\n sg.Button(modValues['product'], key='product')\n ],\n\n [sg.Text(' | ')],\n\n [sg.Button(modValues['side2'], key='side2'), sg.Text(' --> '),\n sg.Button(modValues['sub2'], key='sub2'), sg.Text(' --> '),\n sg.Button(modValues['proc2'], key='proc2'), sg.Text(' --> '),\n sg.Button(modValues['prod2'], key='prod2')\n ],\n\n [sg.Text(divider, key='changeTEXT')],\n\n # add spaces to prevent disappearing-text bug\n [sg.Text('Change ____: ', key='changeMod')],\n\n [sg.Combo(values=[''], key='changeOptions', size=(20, 1)),\n sg.Button('Apply Change')],\n\n [sg.Text('\\n\\n\\n')],\n\n [sg.Text(spacer),\n sg.Button(' UNDO ', key='undo'),\n sg.Button('Load Preset', key='load'),\n sg.Button('Save & Quit', key='exit')]\n\n ]\n\n tab2_layout = [[sg.T(spacer+' '),\n sg.Button(help_text, key='details_help')],\n\n [sg.Text('See details for:')],\n\n [sg.Combo(values=mod_units,\n key='detailOptions', size=(20, 1)),\n sg.Button('Enter', key='Detail Chosen')]]\n\n # TODO:\n # TRY TO UPDATE TAB2 TEXT INSTEAD OF USING A POPUP\n # [sg.Text('Details for _______: \\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n',\n # key='detailText')]]\n\n tab3_layout = [[sg.T(spacer+' '),\n sg.Button(help_text, key='custom_help')],\n\n [sg.Text('Select conversion type:')],\n\n [sg.Combo(values=['material', 'side', 'substrate'],\n key='customTypes', size=(20, 1)),\n sg.Button('Launch', key='customType Chosen')], ]\n\n layout = [[sg.TabGroup([[sg.Tab('Bioprocess', tab1_layout),\n sg.Tab('Details', tab2_layout),\n sg.Tab('Custom', tab3_layout)]])]]\n\n return layout", "def title( self ):\n\t\treturn \"Fix Zero Handles\"", "def rnames_button():\n\n parent.ui.label_design_image.setGeometry(QtCore.QRect(0, 0, 0, 0))\n CEToolkit.band_wraps_button = 0\n text_reader('\\design/regular_names.txt',parent.ui.textEdit_design_image)", "def corner_buttons(self):\r\n buttons = []\r\n if c.user_is_loggedin:\r\n if c.user.name in g.admins:\r\n if c.user_is_admin:\r\n buttons += [NamedButton(\"adminoff\", False,\r\n nocname=not c.authorized_cname,\r\n target = \"_self\")]\r\n else:\r\n buttons += [NamedButton(\"adminon\", False,\r\n nocname=not c.authorized_cname,\r\n target = \"_self\")]\r\n\r\n buttons += [NamedButton('submit', sr_path = not c.default_sr,\r\n nocname=not c.authorized_cname)]\r\n if c.user.safe_karma >= g.discussion_karma_to_post:\r\n buttons += [NamedButton('meetups/new', False,\r\n nocname=not c.authorized_cname)]\r\n buttons += [NamedButton(\"prefs\", False,\r\n css_class = \"pref-lang\")]\r\n buttons += [NamedButton(\"logout\", False,\r\n nocname=not c.authorized_cname,\r\n target = \"_self\")]\r\n\r\n return NavMenu(buttons, base_path = \"/\", type = \"buttons\")", "def home(color_btn, txt_btn):\n return render_template(\"ui.html\",btn_color=color_btn,btn_text=txt_btn)", "def _helpmenu_helpindex():\n self.helpindex = Toplevel(self.master)\n self.helpindex.title(\"Help Index\")\n self.helpindex.geometry(\"300x500\")", "def help_menu_about_activate(self, widget, data=None):\n print \"cmon\"\n button1 = gtk.Button(\"Press Me!\")\n self.fixed1.put(button1, 0, 0)\n button1.window.raise_()\n button1.show()\n button2 = gtk.Button(\"Prease Press Me!\")\n self.fixed1.put(button2, 380, 380)\n button2.show()\n button2.window.raise_()", "def Label(self) -> str:", "def renameTab(self):\n tabname = self.baseUI.tabnameBox.text() #new name text\n tabIndex = self.currentIndex() #index of current tab\n self.setTabText(tabIndex, tabname) #rename tab", "def plugin_second_label():\n return \"second\"", "def makeTitle(self):\n l1=Label(self.app, text=\"Asset Allocation Combinations\")\n l1.grid(row=0, column=0)", "def create_menu():", "def getTitle(self): #$NON-NLS-1$\r", "def getTitle(self): #$NON-NLS-1$\r", "def prepare_UI(self):", "def release_click_alphabet_button(event):\n another_image = image_abc_active[abc_index]\n a_label = buttons_abc[abc_index]\n a_label.config(image=another_image)\n a_label.image = another_image\n a_label.grid(row=ALPHA_ROW, column=ALPHA_COL, pady=2)", "def addToNavBar(des, tipe, Xrc):\n title = des.split(\"/\")[-1].replace(\".html\", \"\")\n new_nav = \"<li id=\\\"$ID$\\\"><a style=\\\"cursor: pointer\\\" onclick=\\\"document.getElementById(\\'Xdisplay\\').contentWindow.location.replace(\\'$LINK$\\'); updateExplorer(\\'$LINK$\\')\\\">$TITLE$</a></li>\\n\\t\\t\\t\\t\\t\\t\\t<!-- $XBOOKS_NAV$ -->\"\n nav = \"<!-- $XBOOKS_NAV$ -->\"\n\n with open(\"Xblog/docs/index.html\", 'r') as f:\n index = f.read()\n f.close()\n\n with open(\"Xblog/docs/index.html\", 'w') as f:\n if tipe == \"Xbook\":\n index = index.replace(nav, new_nav.replace(\"$ID$\", title).replace('$TITLE$', title).replace('$LINK$', '\\\\\\\\' + Xrc[\"gh_repo_name\"] + '/' + 'notebooks/' + title + '/index.html'))\n if tipe == \"Xpage\":\n index = index.replace(nav, new_nav.replace(\"$ID$\", title).replace('$TITLE$', title).replace('$LINK$', '\\\\\\\\' + Xrc[\"gh_repo_name\"] + '/' + 'notebooks/' + title + '.html'))\n f.write(index)\n f.close()\n ccc.success(\"adding \" + title + \" to navigation pallete\")", "def addLabels(t):\n if not t.label:\n t.label = \"\".join([choice(\"abcdefghijklmnopqrstuvwxyz\") for i in range(4)])\n for r,w in t.children:\n addLabels(r)", "def configureTags (self):\n self.window.tag_config(\"a\", foreground = \"blue\", underline=1)\n## self.window.tag_bind('a', '<Button-1>')\n self.window.tag_config('u', underline=1)\n self.window.tag_config('center', justify = CENTER)\n self.window.tag_config('right', justify = RIGHT)", "def _on_release_click(self, event):\n curItem = self.tree.focus()\n# curTerm = curItem\n parent = self.tree.parent(curItem)\n# categories = {'approved':'Approved', 'conflicts':'Conflicts', \\\n# 'suggestions':'Suggestions', 'unknown':'Unknown', \\\n# 'cldr':'CLDR',}\n categories = ['approved', 'conflicts', 'suggestions', 'unknown', \\\n 'cldr',]\n# if parent not in [approved, conflicts, suggestions, unknown, cldr]:\n if parent is '':\n #will expand/collapse\n pass\n else:\n if parent not in categories:\n curTerm = parent\n# category = categories[self.tree.parent(parent)]\n category = self.tree.parent(parent)\n else:\n curTerm = curItem\n# category = categories[parent]\n category = parent\n# messagebox.showwarning(\"index?\", \"category {}=>{}=>{}\".\\\n# format(category, curTerm, curItem))\n if category == 'approved':\n #is approved thetefore term with a single rendering only\n self.lblSource['text'] = '{}=>{}'.\\\n format(self.Source, self.tree.set(curTerm, 'Term'))\n self.lblFallback['text'] = '{}=>'.format(self.Regional)\n self.preferred.set(self.tree.set(curTerm, 'Rendering'))\n pass\n elif category == 'conflicts':\n self.lblSource['text'] = '{}=>{}'.\\\n format(self.Source, self.tree.set(curTerm, 'Term'))\n self.lblFallback['text'] = '{}=>'.format(self.Regional)\n self.preferred.set(self.tree.set(curTerm, 'Rendering'))\n if curTerm != curItem:\n if self.tree.item(curItem)['text'] in ['fallback', ]:\n self.lblFallback['text'] = '{}=>'.\\\n format(self.Regional, \\\n self.tree.set(curItem, 'Rendering'))\n elif self.tree.item(curItem)['text'] in ['rendering', ]:\n self.preferred.set(self.tree.set(curTerm, 'Rendering'))\n pass\n elif category == 'suggestions':\n self.lblSource['text'] = '{}=>{}'.\\\n format(self.Source, self.tree.set(curTerm, 'Term'))\n self.lblFallback['text'] = '{}=>'.format(self.Regional)\n self.preferred.set(self.tree.set(curTerm, 'Rendering'))\n if curTerm != curItem:\n\n if self.tree.item(curItem)['text'] in ['rendering', ]:\n self.preferred.set(self.tree.set(curItem, 'Rendering'))\n elif self.tree.item(curItem)['text'] in ['fallback', ]:\n self.lblFallback['text'] = '{}=>{}'.\\\n format(self.Regional, \\\n self.tree.set(curItem, 'Rendering'))\n pass\n elif category == 'unknown':\n self.lblSource['text'] = '{}=>{}'.\\\n format(self.Source, self.tree.set(curTerm, 'Term'))\n self.lblFallback['text'] = '{}=>'.format(self.Regional)\n self.preferred.set(self.tree.set(curTerm, 'Rendering'))\n if curTerm != curItem:\n if self.tree.item(curItem)['text'] in ['fallback', ]:\n self.lblFallback['text'] = '{}=>{}'.\\\n format(self.Regional, \\\n self.tree.set(curItem, 'Rendering'))\n else:\n self.preferred.set(self.tree.set(curTerm, 'Rendering'))\n elif category == 'cldr':\n messagebox.showwarning(\"Selected cldr row\", \"Term {}=>{}\".\\\n format(self.tree.set(curTerm, 'Term'), \\\n self.tree.set(curTerm, 'Rendering')))\n if curTerm != curItem:\n messagebox.showwarning(\"Selected cldr row\", \"CLDR {}=>{}\".\\\n format(self.tree.set(curTerm, 'Term'), \\\n self.tree.set(curItem, 'Rendering')))\n pass\n else:\n #error condition\n messagebox.showerror('_on_release_click', \\\n 'unknown category >{}<'.format(category))\n return", "def footer_nav(self):\r\n buttons = [NamedButton(\"help\", False, nocname=True),\r\n NamedButton(\"blog\", False, nocname=True),\r\n NamedButton(\"stats\", False, nocname=True),\r\n NamedButton(\"feedback\", False),\r\n NamedButton(\"bookmarklets\", False),\r\n NamedButton(\"socialite\", False),\r\n NamedButton(\"buttons\", True),\r\n NamedButton(\"widget\", True),\r\n NamedButton(\"code\", False, nocname=True),\r\n NamedButton(\"mobile\", False, nocname=True),\r\n NamedButton(\"store\", False, nocname=True),\r\n NamedButton(\"ad_inq\", False, nocname=True),\r\n ]\r\n\r\n return NavMenu(buttons, base_path = \"/\", type = \"flatlist\")", "def update_tips(self, usageText):\r\n try:\r\n self.widgetList[13].config(text=usageText, justify=LEFT)\r\n except TclError:\r\n kT.debug_log(\"Changed menu\", sys.exc_info()[2])\r\n return", "def tabLayout(*args, annotation: Union[AnyStr, bool]=\"\", backgroundColor: Union[List[float,\n float, float], bool]=None, borderStyle: Union[AnyStr, bool]=\"\", changeCommand:\n Script=None, childArray: bool=True, childResizable: bool=True, defineTemplate:\n AnyStr=\"\", docTag: Union[AnyStr, bool]=\"\", doubleClickCommand: Script=None,\n dragCallback: Script=None, dropCallback: Script=None, enable: bool=True,\n enableBackground: bool=True, enableKeyboardFocus: bool=True, exists: bool=True,\n fullPathName: bool=True, height: Union[int, bool]=0, highlightColor:\n Union[List[float, float, float], bool]=None, horizontalScrollBarThickness: int=0,\n image: Union[AnyStr, bool]=\"\", imageVisible: bool=True, innerMarginHeight:\n Union[int, bool]=0, innerMarginWidth: Union[int, bool]=0, isObscured: bool=True,\n manage: bool=True, minChildWidth: Union[int, bool]=0, moveTab: List[int,\n int]=None, newTabCommand: Script=None, noBackground: bool=True, numberOfChildren:\n bool=True, numberOfPopupMenus: bool=True, parent: Union[AnyStr, bool]=\"\",\n popupMenuArray: bool=True, preSelectCommand: Script=None, preventOverride:\n bool=True, scrollable: bool=True, scrollableTabs: bool=True, selectCommand:\n Union[Script, bool]=None, selectTab: Union[AnyStr, bool]=\"\", selectTabIndex:\n Union[int, bool]=0, showNewTab: bool=True, statusBarMessage: AnyStr=\"\", tabLabel:\n Union[List[AnyStr, AnyStr], List[List[AnyStr, AnyStr]], bool]=None,\n tabLabelIndex: Union[List[int, AnyStr], List[List[int, AnyStr]], bool]=None,\n tabPosition: Union[AnyStr, bool]=\"\", tabTooltip: Union[List[AnyStr, AnyStr],\n List[List[AnyStr, AnyStr]], bool]=None, tabTooltipIndex: Union[List[int, AnyStr],\n List[List[int, AnyStr]], bool]=None, tabsClosable: bool=True, tabsVisible:\n bool=True, useTemplate: AnyStr=\"\", verticalScrollBarThickness: int=0, visible:\n bool=True, visibleChangeCommand: Union[Script, bool]=None, width: Union[int,\n bool]=0, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def main_menu_toolbar():\n\n pass", "def change_back_options_button(event):\n img_option_button_mouse_return = PhotoImage(\n file=r\"C:\\Users\\Owner\\PycharmProjects\\Module14\\buttons\\options_raised_normal.png\")\n lbl_options.config(image=img_option_button_mouse_return)\n lbl_options.image = img_option_button_mouse_return\n lbl_options.grid(row=16, column=1, columnspan=8, pady=6)", "def setupButtons(self):\n self.addLayerButton.setAccessibleName('editLayer')\n self.addLayerButton.setText('+')\n self.deleteLayerButton.setAccessibleName('editLayer')\n self.deleteLayerButton.setText('-')\n self.downButton.setArrowType(QtCore.Qt.DownArrow)\n self.upButton.setArrowType(QtCore.Qt.UpArrow)\n self.addLayerButton.setToolTip('Add a new Layer to the Job.')\n self.deleteLayerButton.setToolTip('Delete the selected Layer from the Job.')\n self.downButton.setToolTip('Move the selected Layer down in the Job.')\n self.upButton.setToolTip('Move the selected Layer up in the Job.')", "def set_navigation(self):\n self.close_button.controlUp(self.k1plus_button)\n self.k1plus_button.controlDown(self.k1pro_button)\n self.k1pro_button.controlDown(self.close_button)\n self.k2pro_button.controlDown(self.k1pro_button)\n self.k3pro_button.controlDown(self.k1pro_button)\n self.k1pro_button.controlUp(self.k2pro_button)\n self.k1plus_button.controlRight(self.k2pro_button)\n self.k2pro_button.controlRight(self.k3pro_button)\n self.k3pro_button.controlLeft(self.k2pro_button)\n self.k3pro_button.controlRight(self.k1pro_button)\n self.k2pro_button.controlLeft(self.k1plus_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def buttonManip(*args, icon: AnyStr=\"\", **kwargs)->None:\n pass", "def define_nav_elements(self):\n return [\n TabTip(app=self.app),\n TabIris(app=self.app),\n InstructionsTab(app=self.app),\n ]", "def update_lst(button_1):\r\n\r\n if head_lst:\r\n\r\n if check_click_legal(head_lst[-1],\r\n button_1) and button_1 not in head_lst:\r\n button_1['bg'] = 'Purple'\r\n wrd_lst.append(button_1['text'])\r\n head_lst.append(button_1)\r\n word_display.configure(text=\"\".join(wrd_lst))\r\n for i in head_lst:\r\n if i != head_lst[-1]:\r\n i['bg'] = 'Cyan'", "def iconTextButton(*args, align: Union[AnyStr, bool]=\"\", annotation: Union[AnyStr, bool]=\"\",\n backgroundColor: Union[List[float, float, float], bool]=None, command:\n Union[Script, bool]=None, commandRepeatable: bool=True, defineTemplate:\n AnyStr=\"\", disabledImage: Union[AnyStr, bool]=\"\", docTag: Union[AnyStr,\n bool]=\"\", doubleClickCommand: Union[Script, bool]=None, dragCallback:\n Script=None, dropCallback: Script=None, enable: bool=True, enableBackground:\n bool=True, enableKeyboardFocus: bool=True, exists: bool=True, flat:\n bool=True, flipX: bool=True, flipY: bool=True, font: Union[AnyStr, bool]=\"\",\n fullPathName: bool=True, handleNodeDropCallback: Script=None, height:\n Union[int, bool]=0, highlightColor: Union[List[float, float, float],\n bool]=None, highlightImage: Union[AnyStr, bool]=\"\", image: Union[AnyStr,\n bool]=\"\", image1: Union[AnyStr, bool]=\"\", image2: Union[AnyStr, bool]=\"\",\n image3: Union[AnyStr, bool]=\"\", imageOverlayLabel: Union[AnyStr, bool]=\"\",\n isObscured: bool=True, label: Union[AnyStr, bool]=\"\", labelEditingCallback:\n Script=None, labelOffset: Union[int, bool]=0, ltVersion: Union[AnyStr,\n bool]=\"\", manage: bool=True, marginHeight: Union[int, bool]=0, marginWidth:\n Union[int, bool]=0, noBackground: bool=True, numberOfPopupMenus: bool=True,\n overlayLabelBackColor: Union[List[float, float, float, float], bool]=None,\n overlayLabelColor: Union[List[float, float, float], bool]=None, parent:\n Union[AnyStr, bool]=\"\", popupMenuArray: bool=True, preventOverride:\n bool=True, rotation: Union[float, bool]=0.0, scaleIcon: bool=True,\n selectionImage: Union[AnyStr, bool]=\"\", sourceType: Union[AnyStr, bool]=\"\",\n statusBarMessage: AnyStr=\"\", style: Union[AnyStr, bool]=\"\", useAlpha:\n bool=True, useTemplate: AnyStr=\"\", version: Union[AnyStr, bool]=\"\", visible:\n bool=True, visibleChangeCommand: Union[Script, bool]=None, width: Union[int,\n bool]=0, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr,\n Any]:\n pass", "def get_label(settings):", "def setLabel2(*args):", "def __init__(self, contr: widgets.base_controller,\n parent: widgets.base_widget,\n idstr: str,\n attrdct: dict,\n jsel,\n titletext: str,\n helptext: str) -> None:\n super().__init__(contr, parent, idstr, attrdct, jsel)\n self.wcstatus: wcstatus.WCstatus = contr.wcstatus\n self.addClass(\"w3-container\")\n self.addClass('switchview-cls')\n self.setAttribute('height', '80%')\n self.setAttribute('width', '100%')\n self.h1 = html.h1text(self, titletext)\n help_attrdct = {'class': 'w3-container'}\n self.helptext = html.spanhelptext(self, \"addhelptext\", help_attrdct, helptext)", "def addFrameButtons (self):\n \n self.buttonFrame = f = Tk.Frame(self.outerFrame)\n f.pack()\n \n row1 = Tk.Frame(f)\n row1.pack()\n \n # Create the back and forward buttons, cloning the images & commands of the already existing buttons.\n image = self.lt_nav_iconFrame_button.cget(\"image\")\n command = self.lt_nav_iconFrame_button.cget(\"command\")\n \n self.lt_nav_button = b = Tk.Button(row1,image=image,command=command)\n b.pack(side=\"left\",pady=2,padx=5)\n \n image = self.rt_nav_iconFrame_button.cget(\"image\")\n command = self.rt_nav_iconFrame_button.cget(\"command\")\n \n self.rt_nav_button = b = Tk.Button(row1,image=image,command=command)\n b.pack(side=\"left\",pady=2,padx=5)\n \n row2 = Tk.Frame(f)\n row2.pack()\n self.addStdButtons(row2)\n \n row3 = Tk.Frame(f)\n row3.pack()\n \n self.clear_button = b = Tk.Button(row3,text=\"Clear All\",\n width=6,command=self.clearAll)\n b.pack(side=\"left\",pady=2,padx=5)\n \n self.delete_button = b = Tk.Button(row3,text=\"Delete\",\n width=6,command=self.deleteEntry)\n b.pack(side=\"left\",pady=2,padx=5)", "def set_navigation(self):\n self.close_button.controlUp(self.wplnb1_button)\n self.wplnb1_button.controlRight(self.wplnb2_button)\n self.wplnb2_button.controlRight(self.wplnboth_button)\n self.wplnb1_button.controlDown(self.close_button)\n self.wplnb2_button.controlDown(self.close_button)\n self.wplnboth_button.controlDown(self.close_button)\n self.wplnb1_button.controlLeft(self.wplnboth_button) \n self.wplnb2_button.controlLeft(self.wplnb1_button) \n self.wplnboth_button.controlLeft(self.wplnb2_button)\n self.wplnboth_button.controlRight(self.wplnb1_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def addBgMarkupcallback(self):\n if self.currentSeedNameComboBox.currentIndex != self.currentSeedNameComboBox.count - 1:\n self.lastLabelIndex = self.currentSeedNameComboBox.currentIndex\n self.currentSeedNameComboBox.setCurrentIndex( self.currentSeedNameComboBox.count - 1 )\n interactionNode = slicer.app.applicationLogic().GetInteractionNode()\n interactionNode.SetCurrentInteractionMode(interactionNode.Place)", "def substratesHelp(self):\n rf = os.path.join('docs','helpButtons','prefsSubstrate.html')\n self.showHelpFile( rf )", "def create_buttons(self, frame: tk.LabelFrame) -> None:\n solve_button = tk.Button(frame, text='Solve Sudoku', font=10, command=self.solve)\n solve_button.grid(row=9, column=0, pady=(10, 0))\n examples_button = tk.Button(frame, text='Get example', font=1, command=self.example)\n examples_button.grid(row=9, column=1, pady=(10, 0))\n clear_button = tk.Button(frame, text='Clear Sudoku', font=10, command=self.clear)\n clear_button.grid(row=9, column=2, pady=(10, 0))\n return None", "def getButtonCode(*args):", "def test_issue_remove_label(self):\n pass", "def pnames_button():\n \n parent.ui.label_design_image.setGeometry(QtCore.QRect(0, 0, 0, 0))\n parent.ui.label_design_image.setPixmap(QtGui.QPixmap(\"\"))\n CEToolkit.band_wraps_button = 0\n text_reader('\\design/production_names.txt',parent.ui.textEdit_design_image)", "def set_navigation(self):\n self.close_button.controlUp(self.nos_button)\n self.nos_button.controlDown(self.madeira_button)\n self.nos_button.controlRight(self.nowo_button)\n self.nowo_button.controlDown(self.madeira_button)\n self.nowo_button.controlLeft(self.nos_button)\n self.madeira_button.controlUp(self.nos_button)\n self.madeira_button.controlDown(self.close_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def hide_main_buttons(self):\n pass", "def test_create_custom_button(self):\n pass", "def set_navigation(self):\n self.close_button.controlUp(self.wetek_button)\n self.wetek_button.controlDown(self.close_button)\n self.wetek_button.controlRight(self.k_button)\n self.k_button.controlRight(self.khadas_button)\n self.k_button.controlDown(self.close_button)\n self.khadas_button.controlRight(self.generic_button)\n self.khadas_button.controlDown(self.close_button)\n self.generic_button.controlLeft(self.khadas_button)\n self.generic_button.controlDown(self.close_button)\n self.k_button.controlLeft(self.wetek_button)\n self.khadas_button.controlLeft(self.k_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def make_navbar_for_homepage(self):\n links = [\n \"home\", [\"Result Pages\", self._result_page_links()], \"Version\"\n ]\n if len(self.samples) > 1:\n links[1][1] += [\"Comparison\"]\n if self.publication:\n links.insert(2, \"Publication\")\n if self.gwdata is not None:\n links.append([\"Detchar\", [i for i in self.gwdata.keys()]])\n if self.notes is not None:\n links.append(\"Notes\")\n return links", "def labeling(self, tab, i, j, element):\n label = element\n label.grid(row=i, column=j) # this specifies where in the grid\n tab.grid_columnconfigure(j, weight=1) \n # this last line makes the width of the column responsive to change in width of the window", "def test_update_custom_button(self):\n pass", "def admin_index():\n return 'Super-seekrit admin page.'", "def myAnswerButtons(self):\r\n\r\n txt = oldAnswerButtons(self)\r\n\r\n card = self.card\r\n modelname = card._note._model[\"name\"]\r\n \r\n if not (MULTIPLE_TRIGGER_TYPE_NAME in modelname):\r\n return txt\r\n\r\n if MULTIPLE_TRIGGER_TYPE_NAME in modelname:\r\n if self.typedAnswer == None or not self.typedAnswer.isdigit():\r\n # If the user pressed \"Show Answer\" instead of one of the\r\n # answer buttons, register a wrong answer\r\n ans = 99999\r\n else:\r\n ans = int(self.typedAnswer)\r\n if (ans == 0):\r\n # correct answer, remove \"Again\" button\r\n txt = txt.replace(\"<td\",\"<!--td\",1);\r\n txt = txt.replace(\"/td>\",\"/td-->\",1);\r\n if (ans >= 1):\r\n # wrong answer: remove all buttons but the \"Again\" button\r\n # by first removing all buttons and then selectively enabling\r\n # the very first button\r\n txt = txt.replace(\"<td\",\"<!--td\");\r\n txt = txt.replace(\"/td>\",\"/td-->\");\r\n txt = txt.replace(\"<!--td\",\"<td\",1);\r\n txt = txt.replace(\"/td-->\",\"/td>\",1);\r\n else:\r\n return txt\r\n\r\n return txt", "def test_render_label(self):\n label = self.block.meta.label\n self.assertEqual(label, 'Google Calendar', 'The labels are not the same')", "def buildRigCustom(self, textEdit, uiInst):\n pass", "def reset(self):\n self.setPlainText(self.label)\n self.setEditable(False)\n if (len(str(self.label)) > 0):\n self.setTextWidth(-1)\n else:\n self.setTextWidth(CurrentTheme.VERSION_LABEL_MARGIN[0])\n \n if self.isTag:\n self.setFont(CurrentTheme.VERSION_FONT)\n else:\n self.setFont(CurrentTheme.VERSION_DESCRIPTION_FONT) \n self.updatePos()\n self.parentItem().updateWidthFromLabel()", "def Retranslate_UI(self):\r\n # Explain Of The Function #\r\n\r\n _translate = QtCore.QCoreApplication.translate\r\n self.Prose_Style_Transfer_Label.setText(_translate(\"Progress_Window\",\r\n \"<html><head/><body><p><span \"\r\n \"style=\\\" font-size:60pt; font-style:italic;\\\">PROSE<br/>\"\r\n \"STYLE <br/>TRANSFER</span></p></body></html>\"))\r\n self.Predict_Button.setText(_translate(\"Progress_Window\", \"Predict\"))\r\n self.Help_Button.setText(_translate(\"Progress_Window\", \"Help\"))\r\n self.Back_Button.setText(_translate(\"Progress_Window\", \"Back\"))\r\n self.Label_Created_By.setText(_translate(\"Progress_Window\", \"Created By Din Golan & Matan Peer\\n\" +\r\n \"Supervisor: Dvora Toledano\\n\" +\r\n \"Advice Supervisor: Zeev Vladimir Volkovich\\n\" +\r\n \"Date: 27/01/2020\"))\r\n self.Wait_Label.setText(_translate(\"Progress_Window\", \"Press On 'Predict' Button , And Wait\\n\" +\r\n \"Until The Model Will Finish His Running !\"))\r\n pass", "def __trace_ui_name(trace_name, trace_level):\n\n pass", "def redrawButtons(self):\n for self.a in range(self.btnNumber): # btnNumber = maximum number of buttons\n self.btns[self.a].configure(text=self.btnList[self.a])\n self.btns[self.a].grid()\n self.keypad = 'KeyPad'\n self.title(self.keypad)", "def retranslateUi(self):\r\n _translate = QtCore.QCoreApplication.translate\r\n self.WindowSave.setWindowTitle(_translate(\"self.WindowSave\", \"WindowSave\"))\r\n self.label.setText(_translate(\"self.WindowSave\", self.textlabel))\r\n self.labelTxt.setText(_translate(\"self.WindowSave\", \" .txt \"))", "def editParentIndex(src, des, tipe, Xrc):\n old_title = src.split(\"/\")[-1].replace(\".html\", \"\")\n new_title = des.split(\"/\")[-1].replace(\".html\", \"\")\n index = des.replace(os.path.basename(des), \"index.html\")\n with open(index, 'r') as f:\n soup = BeautifulSoup(f, \"html.parser\")\n f.close()\n tag = soup.select(\"#\"+old_title)[0]\n old_tstamp = tag.td.string.lstrip().rstrip()\n new_tstamp = datetime.datetime.fromtimestamp(time.time()).strftime(\"%H:%M.%S|$MONTH$ %d %Y by Xbooks[bot]\").replace(\"$MONTH$\", chooseMonth(datetime.datetime.fromtimestamp(time.time()).strftime(\"%m\")))\n old_src = tag[\"onclick\"].split(\";\")[0].split('(')[1].split(')')[0]\n if tipe == 'Xbook':\n new_src = '\\'\\\\\\\\' + Xrc[\"gh_repo_name\"] + '/' + \"/\".join(des.split(\"/\")[2:]) + \"/index.html\\'\"\n if tipe == 'Xpage':\n new_src = '\\'\\\\\\\\' + Xrc[\"gh_repo_name\"] + '/' + \"/\".join(des.split(\"/\")[2:]) + \"\\'\"\n tag.td.string = tag.td.string.replace(old_tstamp, new_tstamp)\n tag.th.string = tag.th.string.replace(old_title, new_title)\n tag[\"onclick\"] = tag[\"onclick\"].replace(old_src, new_src)\n tag[\"id\"] = new_title\n with open(index, 'w') as f:\n f.write(soup.prettify(formatter=\"html\"))\n f.close()\n ccc.success(\"updating \" + des + \" from parent index\")", "def change_color_arrow():\n global index_picker\n offset = 0\n if index_picker % 2 == 1:\n offset = 4\n for i in range(3):\n for j in range(2):\n sense.set_pixel(i+1+offset, j, [100, 100, 200])if ARROW[j][i] == 1 else sense.set_pixel(i+1+offset, j, [0, 0, 0])", "def index() -> object:\n return render_template('ue_bootstrap.j2', title='UENERGO TAGS')", "def get_tab_label(self, title, child):\n\n button = Gtk.Button(name=\"output-tab_label-button\",\n relief=2)\n grid = Gtk.Grid(name=\"output-tab_label-grid\")\n image = Gtk.Image.new_from_icon_name(\"dialog-close\", 1)\n image.set_name(\"output-tab_label-image\")\n label = Gtk.Label(name=\"output-tab_label-label\", label=title)\n grid.attach(label, 0, 0, 1, 1)\n grid.attach(button, 1, 0, 1, 1)\n button.add(image)\n button.connect(\"clicked\", self.delete_page, child)\n return grid" ]
[ "0.59079456", "0.57021827", "0.5611441", "0.5596499", "0.5464772", "0.5462852", "0.54272836", "0.5413362", "0.5381082", "0.53032327", "0.5299294", "0.5295865", "0.52915895", "0.5270703", "0.5253295", "0.5223281", "0.52090263", "0.517131", "0.5170982", "0.5129802", "0.512315", "0.51051986", "0.50990885", "0.50752556", "0.50741947", "0.50615704", "0.50527495", "0.50505316", "0.5049832", "0.5038096", "0.5027082", "0.50216216", "0.5018831", "0.50142956", "0.50067675", "0.5005814", "0.50033975", "0.49998653", "0.4997387", "0.4983434", "0.4978655", "0.49731857", "0.4965184", "0.49646688", "0.49470955", "0.49450892", "0.49379477", "0.4921907", "0.49204013", "0.49147546", "0.49089706", "0.49066994", "0.49066994", "0.4899732", "0.48809928", "0.48800078", "0.48758024", "0.48708224", "0.48674327", "0.48651183", "0.4863417", "0.48542154", "0.4853398", "0.48451072", "0.4841427", "0.48385996", "0.48349425", "0.48258027", "0.4815703", "0.48092753", "0.48066872", "0.480292", "0.4802227", "0.48009324", "0.48008054", "0.47987187", "0.47964633", "0.47855255", "0.47805575", "0.4778393", "0.47743973", "0.47694787", "0.47668484", "0.47597677", "0.4757269", "0.47556907", "0.4754223", "0.4751295", "0.47499374", "0.4746663", "0.4744686", "0.47404504", "0.473852", "0.4738248", "0.47250986", "0.4723193", "0.47223946", "0.47164208", "0.47151387", "0.4713076", "0.47112468" ]
0.0
-1
Gets a list of machine proxy objects for machines registered on the server.
def list_services(self): response = self._get() services = [] for s in response["services"]: services.append(_create_service_from_json(s, self._session, self._url_base, s["folderName"])) return services
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _GetMachineList(self):\n machines = self._experiment.remote\n # All Label.remote is a sublist of experiment.remote.\n for l in self._experiment.labels:\n for r in l.remote:\n assert r in machines\n return machines", "def get_machines(self):\n\n return self._machine_manager.get_machines()", "def get_proxies():\n scrapper = Scrapper(category='ALL', print_err_trace=False)\n data = scrapper.getProxies()\n\n proxies = []\n for item in data.proxies:\n proxies.append('{}:{}'.format(item.ip, item.port))\n return proxies", "def get_all_machines(self, tags=None):\n if tags:\n keys, values = tags.keys(), tags.values()\n filter_keys = map(lambda key: \"tag:\" + key, keys)\n filter_tags = dict(zip(filter_keys, values))\n res = self.conn.get_all_instances(filters=filter_tags)\n else:\n res = self.conn.get_all_instances()\n instances = [i.instances[0] for i in res]\n return [MachineDetails(inst) for inst in instances]", "def get_all(self):\n return self.proxies", "def known_nodes(self) -> List[Client]:\n return list(self.in_memory_client_registry.values())", "def working(self) -> list:\n logging.debug(\"%s start\", self.__class__.__name__)\n\n try:\n proxies_list = self.proxies_get()\n except Exception as excep:\n proxies_list = []\n logging.error(\"%s error: %s\", self.__class__.__name__, extract_error_info(excep))\n\n logging.debug(\"%s end\", self.__class__.__name__)\n return proxies_list", "def proxy_nodes(self):\n proxy_nodes = []\n for node in self.nodes:\n if 'proxy' == node.get('infos').get('type'):\n proxy_nodes.append(node)\n return proxy_nodes", "def proxies_pool(self):\n \n PROXY_URL = 'https://www.sslproxies.org/'\n\n # Retrieve the site's page. The 'with'(Python closure) is used here in order to automatically close the session\n # when done\n with requests.Session() as res:\n proxies_page = res.get(PROXY_URL)\n\n # Create a BeutifulSoup object and find the table element which consists of all proxies\n soup = BeautifulSoup(proxies_page.content, 'html.parser')\n proxies_table = soup.find(id='proxylisttable')\n\n # Go through all rows in the proxies table and store them in the right format (IP:port) in our proxies list\n proxies = []\n for row in proxies_table.tbody.find_all('tr'):\n proxies.append('{}:{}'.format(row.find_all('td')[utils['MAGIC_ZERO']].string, row.find_all('td')[MAGIC_ONE].string))\n return proxies", "def get_reachable_servers(self) -> List[Server]:\n pass", "def view_list_machines(self, user):\r\n return [machine.IP for machine in user.realm._balancer._machines]", "def get_machines(self):\n\n active_machines = self.get_active_machines()\n approved_machines = [machine for machine in TestMachine.objects.filter(approved=True) if machine.url in active_machines]\n\n machines = []\n for machine in approved_machines:\n browsers = []\n if machine.chrome:\n browsers.append({\n 'browser': 'chrome',\n 'version': machine.chrome,\n })\n if machine.internet_explorer:\n browsers.append({\n 'browser': 'internet explorer',\n 'version': machine.internet_explorer,\n })\n if machine.firefox:\n browsers.append({\n 'browser': 'firefox',\n 'version': machine.firefox,\n })\n if machine.edge:\n browsers.append({\n 'browser': 'edge',\n 'version': machine.edge,\n })\n\n platform = {\n 'os': machine.operating_system,\n 'version': machine.operating_system_ver\n }\n\n machines.append(TestMachineObj(\n browsers,\n machine.hostname,\n platform,\n machine.url,\n machine.uuid,\n ))\n\n return machines", "def machines(self) -> Iterable[dto.Machine]:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def createMachines():\n machines = []\n for i in range(0, num_of_machines):\n cur_machine = Machine(i)\n machines.append(cur_machine)\n return machines", "def proxies(self):\n\n proxies = APIConsumer.get(\"/proxies\").json()\n proxies_dict = {}\n\n for name, values in viewitems(proxies):\n # Lets create a Proxy object to hold all its data\n proxy = Proxy(**values)\n\n # Add the new proxy to the toxiproxy proxies collection\n proxies_dict.update({name: proxy})\n\n return proxies_dict", "def list(self):\n\n s = self.cloudman.list_servers()\n\n servers = self.get_list(self.cloudman.list_servers(), kind=\"vm\")\n\n result = []\n for server in servers:\n\n if 'cm' in server['metadata']:\n metadata = server['metadata']['cm']\n cm = literal_eval(metadata)\n if 'cm' in server:\n server['cm'].update(cm)\n try:\n server['ip_public'] = self.get_public_ip(server=server)\n except:\n pass\n try:\n server['ip_private'] = self.get_private_ip(server=server)\n except:\n pass\n result.append(server)\n\n return result", "def _init_proxies(self):\n url = 'https://free-proxy-list.net/'\n log.debug('Init proxies: Getting proxy list from web...')\n try:\n soup = BeautifulSoup(get(url).text, \"html5lib\")\n proxies = list()\n for tr in soup.select('#proxylisttable > tbody > tr'):\n td = tr.select('td')\n if (td[4].text == 'elite proxy') & (td[6].text == 'yes'):\n proxies.append(':'.join([td[0].text, td[1].text]))\n return proxies\n except:\n log.exception('Failed to download proxy list.')\n raise", "def proxy_scrape(self):\n print(\"Getting new live proxies\")\n url = 'https://free-proxy-list.net/'\n response = requests.get(url)\n parser = fromstring(response.text)\n proxies = set()\n for i in parser.xpath('//tbody/tr')[:20]:\n # if i.xpath('.//td[7][contains(text(),\"yes\")]'):\n proxy = \":\".join([i.xpath('.//td[1]/text()')\n [0], i.xpath('.//td[2]/text()')[0]])\n proxies.add(proxy)\n # return proxies\n # proxies=[]\n print(\"Obtained proxied are as : \", proxies)\n proxy_pool = cycle(proxies)\n proxy_list = [proxy for proxy in proxies]\n return proxy_pool, proxy_list", "def api_get(self):\n sdc = DataCenter(location=self.joyent_uri, key_id=self.joyent_key_id, secret=self.joyent_secret,\n allow_agent=False, verbose=self.debug)\n servers = sdc.machines()\n return servers", "def proxy(service):\n\n return service.proxy.list()", "def list_machines(args):\n session = Session()\n finder = MachineFinder(args.finder)\n machines = finder.find_machines(session)\n print \"Machines Found: %d\" % (len(machines))\n if len(machines) > 0:\n print\n print PoolMachine.summary_header()\n print \"-\" * 80\n for machine in machines:\n print machine.summary()", "def get_all_servers(self) -> List[Server]:\n pass", "def get_available_proxies(self, conn):\n start_time = int(time.time()) - 2 * 60\n pipe = conn.pipeline(False)\n proxies = list(map(bytes.decode, proxies))\n\n return proxies", "def virtual_machines(self):\n return self._virtual_machines", "def getAllMachines():\n\n validHosts = (subprocess.getoutput(\"qconf -shgrpl\").split())\n machineList = []\n processedHGList = []\n readNodes = False\n for host in validHosts:\n hostMachineList = ((subprocess.getoutput(\"qconf -shgrp_tree \" + str(host))).split())\n for element in hostMachineList:\n if '@' not in element: # If it is not a HG name\n if element not in machineList:\n machineList.append(element) # Searching whole list for this node\n machineList.sort()\n return machineList", "def get(self, pages=pages):\n try:\n self.pool.map(self.proxyPage,pages)\n except urllib.error.HTTPError as e:\n self.run(e.geturl().split('/')[-1])\n return self.proxys", "def get_servers(self):\n\t\treturn self.__servers", "def remote(self) -> Remotes:\n proxy = weakref.proxy(self._remote)\n return proxy", "def remotes(self) -> \"IterableList[Remote]\":\n return Remote.list_items(self)", "def get_available_proxies(cls):\n if not cls.proxy_mesh_authenticated and PROXY_MESH_SETTINGS:\n cls.proxy_mesh_authenticated = cls._authenticate_proxy_mesh()\n\n if not cls.available_proxies and PROXY_MESH_SETTINGS:\n all_proxies = PROXY_MESH_SETTINGS.get(\"all-proxies\")\n proxies = []\n\n if not all_proxies:\n raise Exception(\"You should define the list of all-proxies\")\n\n only_proxies_from = PROXY_MESH_SETTINGS.get(\"only-proxies-from\")\n only_proxies_from = only_proxies_from.split(\",\") if only_proxies_from else None\n for proxy in all_proxies:\n if only_proxies_from:\n country = cls.get_country_from_proxy_address(proxy)\n if not country:\n continue\n\n if country not in only_proxies_from:\n continue\n\n _proxy = {\n \"http\": \"http://\" + proxy,\n \"https\": \"https://\" + proxy,\n \"no_proxy\": \"localhost,127.0.0.1\", # excludes\n }\n proxies.append(_proxy)\n cls.available_proxies = proxies\n\n return cls.available_proxies", "def servers(self):\n response = self._request(\"GET\", [ROUTE_SERVERS])\n\n return CBWParser().parse_response(CBWServer, response)", "def _get_subclient_proxies(self):\r\n associated_proxies = []\r\n try:\r\n available_subclient_proxies = self._vsaSubclientProp[\"proxies\"][\"memberServers\"]\r\n if len(available_subclient_proxies) > 0:\r\n for proxy in available_subclient_proxies:\r\n associated_proxies.append(proxy[\"client\"][\"clientName\"])\r\n except KeyError:\r\n pass\r\n return associated_proxies", "def _get_proxies() -> List[str]:\n\n response = requests.get('https://free-proxy-list.net/')\n soup = BeautifulSoup(response.text, 'html.parser')\n table = soup.findAll('div', {'class': 'table-responsive'})\n\n return [\n ':'.join([x.contents[0] for x in row.findAll('td')[:2]])\n for row in table[0].contents[0].contents[1].children\n ]", "def mmo_mongos_servers(self, mmo_connection):\n mongos_servers = []\n c = mmo_connection[\"config\"].mongos.find({}, { \"_id\": 1 } )\n for doc in c:\n hostname, port = doc[\"_id\"].split(\":\")\n mongos_servers.append({ \"hostname\": hostname, \"port\": int(port) })\n return mongos_servers", "def clients(self):\n return MappingProxyType(self._server.clients\n if self._server else {})", "def _get_guests():\n _guests = list()\n try:\n conn = libvirt.open(None)\n if conn:\n _domains = conn.listAllDomains(0)\n else:\n raise ValueError('Failed to contact hypervisor.')\n except libvirt.libvirtError as e:\n _logger.error('Failed to contact hypervisor')\n raise ValueError('Failed to contact hypervisor.')\n finally:\n conn.close()\n return _domains", "def machines():\n return [name for name, state in _status()]", "def _get_proxies(self) -> dict:\n return self._proxies.copy() if self._proxies else None", "async def get_vms(self) -> List[CachingVirtualMachine]:\n return await self._vm_fetcher.get_vms()", "def get_objects(self):\n has_more = True\n marker = None\n while has_more:\n servers = openstack_clients.get_novaclient().servers.list(\n limit=LIST_LIMIT,\n search_opts={'all_tenants': True},\n marker=marker\n )\n\n if not servers:\n # Definitely no more; break straight away\n break\n\n # servers.list always returns a list so we can grab the last id\n has_more = len(servers) == LIST_LIMIT\n marker = servers[-1].id\n\n for server in servers:\n yield server", "async def discover(self):\n\n def get_discovered_servers(discovery):\n servers = discovery.all()\n discovery.stop()\n return servers\n\n discovery = RoonDiscovery(None)\n servers = await self._hass.async_add_executor_job(\n get_discovered_servers, discovery\n )\n _LOGGER.debug(\"Servers = %s\", servers)\n return servers", "def populate(self, proxies):\n\n populated_proxies = []\n\n for proxy in proxies:\n existing = self.get_proxy(proxy[\"name\"])\n\n if existing is not None and (existing.upstream != proxy[\"upstream\"] or existing.listen != proxy[\"listen\"]):\n self.destroy(existing)\n existing = None\n\n if existing is None:\n proxy_instance = self.create(**proxy)\n populated_proxies.append(proxy_instance)\n\n return populated_proxies", "def list_machines(request):\n auth_context = auth_context_from_request(request)\n cloud_id = request.matchdict['cloud']\n # SEC get filtered resources based on auth_context\n try:\n cloud = Cloud.objects.get(owner=auth_context.owner,\n id=cloud_id, deleted=None)\n except Cloud.DoesNotExist:\n raise NotFoundError('Cloud does not exist')\n\n machines = methods.filter_list_machines(auth_context, cloud_id)\n\n if cloud.machine_count != len(machines):\n try:\n tasks.update_machine_count.delay(\n auth_context.owner.id, cloud_id, len(machines))\n except Exception as e:\n log.error('Cannot update machine count for user %s: %r' %\n (auth_context.owner.id, e))\n\n return machines", "def objs(self):\n return (\n Nameserver.objects.create(\n domain=self.r, server='ns2.moot.ru'),\n Nameserver.objects.create(\n domain=self.r, server='ns5.moot.ru'),\n Nameserver.objects.create(\n domain=self.r, server=u'ns3.moot.ru'),\n Nameserver.objects.create(\n domain=self.b_f_r, server='n1.moot.ru'),\n Nameserver.objects.create(\n domain=self.b_f_r, server='ns2.moot.ru'),\n Nameserver.objects.create(\n domain=self.r, server='asdf.asdf'),\n )", "def all_machines():\n return sorted(MACHINES, key=str)", "def subclient_proxy(self):\r\n return self._get_subclient_proxies()", "def get_active_proxies(max_proxy=20):\n proxies = get_free_proxies()\n\n pool = ThreadPool(50)\n active_proxies = pool.map(check_proxy, proxies)\n active_proxies = [x for x in active_proxies if x is not None]\n\n if not active_proxies:\n discord.send_message(\"No proxy to use\")\n raise Exception(\"No proxy to use\")\n\n return active_proxies[:max_proxy]", "def get_all_vms(self):\n available_servers = self.connection.compute.servers()\n if available_servers:\n vm_names = [server.name for server in available_servers]\n return vm_names\n else:\n return []", "def get_processes(self):\n processes={}\n for (server_ip, server_port) in self.hosts:\n try:\n server = xmlrpclib.ServerProxy(\"http://%s:%d\"%(server_ip, server_port))\n uid = server.get_id()\n if uid != self.uid:\n processes[uid] = server\n except socket.error:\n pass\n return processes", "def get_proxies():\n url = 'https://hidemy.name/ru/loginx'\n url1 = 'https://hidemy.name/api/proxylist.txt?out=plain&lang=ru'\n data = {'c':'169496407732367'}\n s = requests.session()\n s.get(url1)\n s.post(url, data=data)\n res = s.get(url1)\n result = res.text.split('\\r\\n')\n return result", "def getMonitoringHosts(self):\r\n return self.monitoringClients.values()", "def servers(self):\n return self._servers", "def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer", "def list_servers(self, request, paginate):\n raise NotImplementedError", "def get_virtual_machines(_id):\n url = '/%s' % str(_id)\n return atmosphere.tools.create_req(url=url)", "def get_all(self):\n\n servers = self._scoped_servers()\n servers = [{u'id': x.id, u'name': x.name} for x in servers]\n return self.format_collection(servers)", "def getHosts(self):\n raise \"not implemented\"", "def getServerInterfaces(self):\n return self.servers", "def get_dns_servers(self):\n self.__not_implemented()", "def get_servers():\n all_servers = []\n start = 0\n size = 100\n\n while True:\n params = {\n 'start': start,\n 'size': size,\n 'names': 1,\n 'cdata': 1\n }\n\n xml_content = _call(\n servers_base_url + 'get_server_list.php',\n parser='xml',\n params=params\n )\n\n servers = [Server.load(server_node) for server_node in xml_content.xpath('/result/server')]\n\n if not servers:\n break\n\n all_servers.extend(servers)\n\n if servers[-1].is_last:\n break\n\n start += size\n\n _set_servers_location(all_servers)\n _set_server_event(all_servers)\n\n all_servers.sort(\n key=lambda s: s.players.current,\n reverse=True\n )\n\n return all_servers", "def get_servers(self):\n url = '%s/servers/detail' % self.catalog['compute']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['servers']\n else:\n LOG.error('Get servers failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def list_machines(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n\n if verbose:\n attributes = self.engine.all_attributes()\n else:\n attributes = [\"sAMAccountName\", \"objectClass\"]\n\n self.display(self.engine.query(self.engine.COMPUTERS_FILTER(), attributes), verbose, specify_group=False)", "def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDevNet_GetDnsServers', self.handle))", "def get_connections(self, name):\n cls, pending, connected = self._proxies[name]\n return list(connected)", "def vms(self) -> List[CachingVirtualMachine]:\n return self._vm_fetcher.vms", "def extract_proxies(text: AnyStr) -> Iterable[Proxy]:\n results = set()\n matches = findall(MATCH_PROXY, text)\n for ip, port in matches:\n proxy = Proxy.from_string(f\"https://{ip}:{port}/\")\n if proxy is not None:\n results.add(proxy)\n\n return results", "def get_hosts(self):\n\n raise NotImplementedError", "def list_computers(self, kwargs):\n resolve = \"resolve\" in kwargs and kwargs[\"resolve\"]\n dns = kwargs.get(\"dns\", \"\")\n dc = kwargs.get(\"dc\", False)\n\n hostnames = []\n if not dc:\n results = self.engine.query(self.engine.COMPUTERS_FILTER(), [\"name\"])\n else:\n results = self.engine.query(self.engine.DC_FILTER(), [\"name\"])\n for result in results:\n if \"name\" in result: # ugly\n computer_name = result[\"name\"]\n else:\n computer_name = result[:-1] # removing trailing $ sign\n\n hostnames.append(f\"{computer_name}.{self.engine.fqdn}\")\n # print only if resolution was not mandated\n if not resolve:\n print(f\"{computer_name}.{self.engine.fqdn}\")\n # do the resolution\n if resolve:\n for computer in utils_resolve(hostnames, dns):\n print(\"{addr:20} {name}\".format(addr=computer[\"address\"], name=computer[\"hostname\"]))", "def mmo_shard_servers(self, mmo_connection):\n shard_servers = []\n c = mmo_connection[\"config\"].shards.find({})\n for doc in c:\n shard = doc[\"_id\"]\n for host in doc[\"host\"].split(shard + \"/\", 1)[1].split(\",\"):\n hostname, port = host.split(\":\")\n shard_servers.append({ \"shard\": shard, \"hostname\": hostname, \"port\": int(port) })\n return shard_servers", "def get_memcached_hosts():\n cache_info = settings.CACHES[DEFAULT_FORWARD_CACHE_ALIAS]\n backend = cache_info['BACKEND']\n locations = cache_info.get('LOCATION', [])\n\n if 'memcached' not in backend or not locations:\n locations = []\n elif not isinstance(locations, list):\n locations = [locations]\n\n return locations", "def clients(self):\n items = []\n for elem in self.query('/clients'):\n baseurl = 'http://%s:%s' % (elem.attrib['address'],\n elem.attrib['port'])\n items.append(PlexClient(baseurl, server=self, data=elem))\n return items", "def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfg_GetDnsServers', self.handle))", "def get_all_virtual_machines(app_id=None):\n url = ''\n if app_id is not None:\n url += 'appliance_id=%s' % str(app_id)\n if len(url) > 0:\n url = '?' + url\n return atmosphere.tools.create_req(url=url)", "def getMachines(tasks):\n uniqueMachines = []\n for t in tasks:\n if t.machine not in uniqueMachines:\n uniqueMachines.append(t.machine)\n return uniqueMachines", "def return_proxy(self):\n\n check_server()\n url='{url}/proxy_return'.format(url=config.SERVER_URL)\n proxy_ret= [x.raw_data for x in self.proxy_pool]\n proxy_str=''\n\n for item in proxy_ret:\n proxy_str=proxy_str+item\n data={\n 'data':proxy_str\n }\n\n data=parse.urlencode(data).encode('utf-8')\n\n try:\n opener=request.build_opener()\n req=request.Request(url,data)\n res=opener.open(req).read().decode('utf-8')\n except:\n try:\n opener=request.build_opener()\n req=request.Request(url,data)\n res=opener.open(req).read().decode('utf-8')\n except:\n err_str='error:client->return_proxy:unable to ' \\\n 'connect to server'\n info_manager(err_str,type='KEY')\n return\n\n if 'return success' in res:\n print('Success: return proxy to server')\n return\n else:\n err_str='error:client->return_proxy:'+res\n info_manager(err_str,type='KEY')\n # raise ConnectionError('Unable to return proxy')\n return", "def redis_client_list(self):\n def func(server):\n return server.server.client_list()\n self.__run_redis_cmd(func)", "def slave_hosts(self) -> 'List[str]':\n raise NotImplementedError", "async def register_rpc_proxies(self):\n for rpc_name in self.rpc_proxy_list:\n logger.debug('Registering RPC to Proxy: {}'.format(rpc_name))\n\n class RPCProxy:\n\n def __init__(self, local_session, rpc_name):\n self._local_session = local_session\n self._rpc_name = rpc_name\n\n async def __call__(self, *args, **kwargs):\n logger.debug('Proxying RPC {}, with args {}, kwargs {}'.format(self._rpc_name, args, kwargs))\n return await self._local_session.call(self._rpc_name, *args, **kwargs)\n\n await self.remote_session.register(RPCProxy(self.local_session, rpc_name), rpc_name)", "def get_clients(self):\n clis = []\n for c in self._clients:\n clis.append(c.get_address())\n return clis", "def get_clients(self):\n clis = []\n for c in self._clients:\n clis.append(c.get_address())\n return clis", "def get_queryset(self):\n query_set = super(NetworkProxyManager, self).get_queryset()\n return query_set", "def get_public_proxies(self, continent=\"\"):\n logging.debug(\"Filling proxy bag\")\n\n try:\n payload = {}\n if continent:\n payload = {\n \"continent\": continent,\n }\n response = self._make_internal(\"proxies\", payload)\n except errors.NoRemoteServicesConnection:\n logging.error(\"Unable to connect to Bad-Actor.Services\")\n raise errors.NoRemoteServicesConnection\n\n try:\n self.proxy_bag = response.json()[\"objects\"]\n except Exception:\n logging.error(\"ERROR: Could not get proxies. %s\" % response.text)\n return False\n\n logging.debug(\"Fetched %s proxies\" % len(self.proxy_bag))\n\n # Shuffle the proxies so concurrent instances of CarpetBag wont use the same proxy\n shuffle(self.proxy_bag)\n\n return self.proxy_bag", "def proxy_addresses(self):\n if \"proxyAddresses\" in self._prop_dict:\n return self._prop_dict[\"proxyAddresses\"]\n else:\n return None", "def address(self):\n addrlist = []\n for s in self.srv_socks:\n addrlist.append(s.getsockname())\n return addrlist", "def get(self):\r\n args = self.get_args.parse_args()\r\n num_rows = args.get(\"rows\") or 100\r\n query = g.db.query(Machine)\r\n if args.get(\"realm\", None) not in (\"aws\", \"local\"):\r\n abort(http_client.BAD_REQUEST, description=\"realm must be 'aws' or 'local'\")\r\n\r\n if args[\"realm\"] == \"local\":\r\n query = query.filter(Machine.realm == \"local\",\r\n Machine.instance_name == args[\"instance_name\"])\r\n else:\r\n missing = []\r\n for param in (\"instance_id\", \"instance_type\", \"placement\", \"public_ip\"):\r\n if not args[param]:\r\n missing.append(param)\r\n if missing:\r\n abort(http_client.BAD_REQUEST,\r\n description=\"missing required parameters: %s\" % \", \".join(missing))\r\n query = query.filter(Machine.realm == args[\"realm\"],\r\n Machine.instance_name == args[\"instance_name\"],\r\n Machine.instance_id == args[\"instance_id\"],\r\n Machine.instance_type == args[\"instance_type\"],\r\n Machine.placement == args[\"placement\"],\r\n Machine.public_ip == args[\"public_ip\"],\r\n )\r\n query = query.order_by(-Machine.machine_id)\r\n query = query.limit(num_rows)\r\n rows = query.all()\r\n ret = []\r\n for row in rows:\r\n record = row.as_dict()\r\n record[\"url\"] = url_for(\"machines.entry\", machine_id=row.machine_id, _external=True)\r\n ret.append(record)\r\n\r\n return jsonify(ret)", "def get_vms(self, user=None, count=None):\n crit = dict()\n if count is not None:\n crit['count'] = count\n s = self._NDL_API('getvms', crit, user)\n if len(s) == 0:\n return []\n ips = s.split(',')\n # if the localhost's IP is in the list, move it to the front\n localips = getmyips()\n for i in range(len(ips)):\n if ips[i] in localips:\n x = ips[i]\n del ips[i]\n return [ x, ] + ips\n # otherwise order does not matter?\n return ips", "def get_proxies():\n # url = 'http://nntime.com//'\n url = 'https://free-proxy-list.net/'\n\n response = requests.get(url)\n parser = fromstring(response.text)\n proxies = set()\n for i in parser.xpath('//tbody/tr'):\n if i.xpath('.//td[7][contains(text(),\"yes\")]'):\n proxy = \":\".join([i.xpath('.//td[1]/text()')[0], i.xpath('.//td[2]/text()')[0]])\n proxies.add(proxy)\n return proxies", "def list_instances(self):\n instances = []\n try:\n pages = self.compute.virtual_machines.list(\n CONF.azure.resource_group)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.InstanceListFailure(reason=six.text_type(e))\n raise ex\n else:\n if pages:\n for i in pages:\n instances.append(i.name)\n return instances", "def list_instances(self):\n nodes = self._driver.list_nodes()\n return [[n.name, n.state, n.public_ips] for n in nodes]", "def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfgNet_GetDnsServers', self.handle))", "def hosts(self) -> List[str]:\n if self.head_host:\n return [self.head_host]\n else:\n return [replica.host for replica in self.pod_args['pods'][0]]", "def get_servers(self) -> dict:\n uri = f\"{self.uri}/servers\"\n\n response = self.request(uri=uri)\n return response.json()", "def GetListHostsObjects(self):\n rdata = []\n total = 0\n with open(self.ServerInfo['MgmtObjects'].MGMTServerFilePathNetObjects) as f:\n data = json.load(f)\n total = data['total']\n if total == 0:\n return None\n for i in range(total):\n rdata.append([data['objects'][i]['name'],data['objects'][i]['ipv4-address']])\n return rdata", "def get_servers(self):\n json_scheme = self.gen_def_json_scheme('GetServers')\n json_obj = self.call_method_post(method='GetServers', json_scheme=json_scheme)\n self.json_servers = json_obj\n # if this method is called I assume that i must re-read the data\n # so i reinitialize the vmlist\n self.vmlist = VMList()\n # getting all instanced IP in case the list is empty\n if len(self.iplist) <= 0:\n self.get_ip()\n for elem in dict(json_obj)[\"Value\"]:\n if elem['HypervisorType'] is 4:\n s = Smart(interface=self, sid=elem['ServerId'])\n else:\n s = Pro(interface=self, sid=elem['ServerId'])\n s.vm_name = elem['Name']\n s.cpu_qty = elem['CPUQuantity']\n s.ram_qty = elem['RAMQuantity']\n s.status = elem['ServerStatus']\n s.datacenter_id = elem['DatacenterId']\n s.wcf_baseurl = self.wcf_baseurl\n s.auth = self.auth\n s.hd_qty = elem['HDQuantity']\n s.hd_total_size = elem['HDTotalSize']\n if elem['HypervisorType'] is 4:\n ssd = self.get_server_detail(elem['ServerId'])\n try:\n s.ip_addr = str(ssd['EasyCloudIPAddress']['Value'])\n except TypeError:\n s.ip_addr = 'Not retrieved.'\n else:\n s.ip_addr = []\n for ip in self.iplist:\n if ip.serverid == s.sid:\n s.ip_addr = ip\n self.vmlist.append(s)\n return True if json_obj['Success'] is True else False", "def getMonitors(self):\n return [self.monitor]", "def remotes():", "def find_all(client):\n return list(map(lambda s: Site(s), client.get_api_resource(\"self/sites\")))", "def virtual_machines(self) -> Sequence['outputs.SubResourceWithColocationStatusResponse']:\n return pulumi.get(self, \"virtual_machines\")", "async def _free_proxy_list_parser(content):\n page = lxml.html.fromstring(content)\n proxies = []\n for row in page.cssselect('tbody tr'):\n tds = row.cssselect('td')\n if 'yes' in tds[6].text_content():\n proxies.append('https://%s:%s' % (tds[0].text_content(), tds[1].text_content()))\n else:\n proxies.append('http://%s:%s' % (tds[0].text_content(), tds[1].text_content()))\n\n return proxies", "def VirtualMachines(self):\n if not self._vms:\n vms = self._get_objects(vim.VirtualMachine)\n for vm in vms:\n self._vms[vm.name] = VirtualMachine(vm, self.service_instance)\n return self._vms", "def proxy_scrape():\r\n url = r'https://free-proxy-list.net'\r\n\r\n r = requests.get(url, timeout=7)\r\n soup = BeautifulSoup(r.text, 'lxml')\r\n\r\n # Select the table excluding the first and the last row\r\n table = soup.select(\"#proxylisttable tr\")\r\n table = table[1:-1]\r\n proxy_set = set()\r\n\r\n for row in table:\r\n # Go through each row, merge IP addresses with corresponding ports and add them to a set\r\n ip_addr = row.select_one('td').text\r\n port = row.select_one('td + td').text\r\n proxy = f'{ip_addr}:{port}'\r\n proxy_set.add(proxy)\r\n\r\n print(f'Total amount of proxies: {len(proxy_set)}')\r\n\r\n return proxy_set" ]
[ "0.6875486", "0.6869549", "0.6537448", "0.6433981", "0.6399903", "0.637255", "0.6364685", "0.6354067", "0.63194156", "0.6291957", "0.62829745", "0.6226571", "0.6222153", "0.6200053", "0.6157794", "0.6109501", "0.6103711", "0.60903883", "0.6048954", "0.60453343", "0.59695596", "0.59610933", "0.59367126", "0.59102464", "0.5899385", "0.58364314", "0.5831132", "0.5819415", "0.5785164", "0.57181937", "0.5692978", "0.5677293", "0.56597805", "0.56306356", "0.56291085", "0.561791", "0.5603244", "0.56019413", "0.5591476", "0.55874985", "0.55828047", "0.55484855", "0.55415523", "0.55268824", "0.5521663", "0.5516832", "0.5477919", "0.54678446", "0.545782", "0.5451915", "0.5437969", "0.54347557", "0.54324895", "0.54301125", "0.5419659", "0.5417846", "0.5406525", "0.5390733", "0.5384926", "0.5376482", "0.5355384", "0.5347645", "0.5346162", "0.53384465", "0.5334108", "0.5328971", "0.53283405", "0.5319805", "0.5312564", "0.53045595", "0.5293661", "0.52921265", "0.52883714", "0.52854115", "0.52829516", "0.52748024", "0.526496", "0.52614385", "0.5252405", "0.5252405", "0.52486116", "0.5247414", "0.5246628", "0.52420163", "0.52387214", "0.523729", "0.523076", "0.5229742", "0.5225104", "0.5221451", "0.52161676", "0.52090645", "0.5187209", "0.51856214", "0.5177661", "0.514511", "0.5106971", "0.5105371", "0.50991285", "0.50974184", "0.5095123" ]
0.0
-1
helper to return all possible splits this function returns all the possible splits on the given attribute on the provided dataframe
def get_possible_splits( df , attribute ): ds = df.loc[:,attribute] # First sort the values ds = ds.sort_values().drop_duplicates() # Compute averages of consecutive values ds = ds.rolling(2).sum().divide(2) splits = ds[1:].tolist() # return the possible splits return splits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def splitBy(data, attribute_id):\n \n col = getColumn(data, attribute_id)\n values = set(col)\n split_data = [] \n for i in values:\n subset = [row for row in data if row[attribute_id] == i]\n split_data.append(subset)\n \n return split_data", "def generate_splits( records, index ):\n splits = []\n is_numerical = feature_is_numerical( records, index )\n if is_numerical:\n splits = generate_numerical_splits( records, index )\n else:\n splits = generate_category_splits( records, index )\n\n return splits", "def potential_splits(self, potential_xj):\r\n \r\n self.cur.execute(\"SELECT DISTINCT \" + potential_xj + \" FROM \" + self.table_name + \";\")\r\n potential_splits = [ii[0] for ii in self.cur.fetchall()]\r\n return potential_splits", "def split(df, group):\n\n data = namedtuple(\"data\", [\"filename\", \"object\"]) #initiate \"data\" tyoe\n gb = df.groupby(group) #group df by group attribute\n return [\n data(filename, gb.get_group(x))\n for filename, x in zip(gb.groups.keys(), gb.groups)\n ]", "def find_splits(col):\n unique = np.unique(sorted(col))\n splits = np.array([np.mean([unique[i], unique[i+1]]) for i in range(len(unique)-1)])\n return splits", "def split(df, stratify_by=None):\n \n if stratify_by == None:\n train, test = train_test_split(df, test_size=.3, random_state=123)\n train, validate = train_test_split(df, test_size=.3, random_state=123)\n else:\n train, test = train_test_split(df, test_size=.2, random_state=123, stratify=df[stratify_by])\n train, validate = train_test_split(df, test_size=.3, random_state=123, stratify=train[stratify_by])\n \n return train, validate, test", "def splitting_df(dataframe):\n dataframe = dataframe.dropna()\n index = 100\n train_set = dataframe.iloc[:index]\n test_set = dataframe.iloc[index:]\n return train_set, test_set, dataframe", "def evaluate_split( df, attribute, split ):\n mask = df[attribute] <= split\n \n # split the dataset on the split attribute\n dfl = df[mask]\n dfr = df[~mask]\n \n \n # calculate weighting factors for child\n weighting_factor_left = float(dfl.shape[0])/df.shape[0]\n weighting_factor_right = float(dfr.shape[0])/df.shape[0]\n\n # calculate gini for left and right\n gini_parent = gini_impurity(df)\n gini_left = gini_impurity(dfl)\n gini_right = gini_impurity(dfr)\n \n # calculate weighted gini for this split \n weighted_gini = gini_parent - (weighting_factor_left*gini_left + weighting_factor_right*gini_right)\n return weighted_gini", "def prepare_stops_to_request(df: pd.DataFrame) -> list:\n return [split_df(df, i, i + 100) for i in range(0, len(df), 100)]", "def get_splits_list(\n self,\n columns: list) -> np.ndarray:\n\n splits = []\n\n for idx, (left, right) in enumerate(zip(self.ts_df['left'], self.ts_df['right'])):\n res = self.main_df.loc[(self.main_df['Timestamp'] >= left) & (self.main_df['Timestamp'] <= right)]\n\n split = np.asarray(res[columns].values.tolist())\n\n splits.append(split)\n\n return np.asarray(splits, dtype=object)", "def getSplits(df, train_size, val_size, test_size, seed=None):\n size = len(df)\n\n # size is considered a percentage if less than 1:\n train_size = int(train_size * size) if train_size < 1 else train_size\n val_size = int(val_size * size) if val_size < 1 else val_size\n test_size = int(test_size * size) if test_size < 1 else test_size\n\n if not seed is None:\n np.random.seed(seed)\n\n train_val_idx = np.random.choice(\n a=range(size),\n size=train_size + val_size,\n replace=False\n )\n train_idx = train_val_idx[:train_size]\n val_idx = train_val_idx[train_size:]\n\n train = df.iloc[train_idx]\n val = df.iloc[val_idx]\n test = df.drop(train.index).drop(val.index) # test is equal to the leftover\n\n assert len(train) + len(val) + len(test) == len(df)\n\n return train, val, test", "def splitData(df, split):\n train = df.iloc[:int(len(df)*split)]\n test = df.iloc[int(len(df)*split):]\n \n return train, test", "def split_dataframe(df, split_elements_list):\n y = df.filter(split_elements_list)\n x = df.drop(split_elements_list, axis=1)\n\n return x, y", "def split_phases(df):\n return(\n tuple([df.groupby('Phase').get_group(p) for p in df.Phase.unique()])\n )", "async def _get_all_splits(self):\n\n # Grabs values from sheet\n col_name = self.sheet.col_values(1)\n col_split = self.sheet.col_values(2)\n\n # Creates list of tuple pairs with (name, split amount)\n # If split cannot be made proper integer, returns None for ammount\n values = []\n for i in range(len(col_name)):\n name = col_name[i]\n try:\n amount = int(col_split[i].replace(\",\", \"\").replace(\"$\", \"\"))\n except (ValueError, IndexError):\n amount = None\n values.append((name, amount))\n return values", "def get_splits(self) -> Dict[str, np.array]:\n\n return self.splits", "def making_dataset_list_val(data:pandas.core.frame.DataFrame, split_num:int) -> list:\n val_data_list = []\n for i in range(split_num):\n val_data_list.append(data[data['separate_num'] == i])\n for i in range(split_num):\n val_data_list[i] = val_data_list[i].drop(['separate_num'], axis = 1)\n return val_data_list", "def split_into_components(X_df, G):\n components = list(networkx.components.connected_components(G))\n\n X_splits = [X_df.filter(items=component) for component in components]\n subgraphs = [G.subgraph(component) for component in components]\n return X_splits, subgraphs", "def getSplit(self):\n b_index, b_value, b_score, b_groups = 999, 999, 999, None\n for j in range(len(self[0]) - 1):\n for i in range(len(self)):\n groups = self.splitAttribute(j, self[i][j]) # lit, big\n gini = self.giniIndex(groups)\n if gini < b_score and (j, \"%.1f\" % self[i][j]) not in self.atr:\n b_index, b_value, b_score, b_groups = j, self[i][\n j], gini, groups\n return b_index, b_value, b_groups, b_score", "def _split_by_filename(\n df: pd.DataFrame):\n data = namedtuple('data', ['filename', 'object'])\n gb = df.groupby('filename')\n return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]", "def generate_numerical_splits( records, index ):\n possible = {}\n for r in records:\n possible[ r.features[index] ] = True\n possible = possible.keys()\n splits = []\n\n for i in xrange(0, len(possible)-1):\n s = Split(is_numerical=True)\n s.set_numerical_range( possible[i] )\n s.place( records, index )\n splits.append( s )\n\n return splits", "def split_data(df, split_method='fo', test_size=.2, random_state=42):\n if split_method == 'fo':\n train_set, test_set = _split_fo(df,\n test_size=test_size,\n random_state=random_state)\n elif split_method == 'tfo':\n train_set, test_set = _split_tfo(df, test_size=test_size)\n elif split_method == 'ufo':\n train_set, test_set = _split_ufo(df,\n test_size=test_size,\n random_state=random_state)\n elif split_method == 'utfo':\n train_set, test_set = _split_utfo(df, test_size=test_size)\n else:\n raise HuitreError('Invalid data_split value, expect: ufo, utfo')\n train_set = train_set.reset_index(drop=True)\n test_set = test_set.reset_index(drop=True)\n return train_set, test_set", "def dataset_splits(self):\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 80,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 2,\n }]", "def getKSplits(df, n_splits, seed = None):\n\n result = []\n\n # None random seed is same as not setting it\n df_shuffled = df.sample(len(df), random_state = seed)\n\n fold_size = int(len(df) / n_splits)\n\n for i in range(n_splits):\n if i == n_splits - 1: # last iteration\n df_fold = df_shuffled[fold_size * (i): len(df)] # gets remainder\n else:\n df_fold = df_shuffled[fold_size * (i):fold_size * (i + 1) ] # python starts indexing at 0\n result.append(df_fold)\n\n return result", "def split(self, params):\n\n if \"train_df\" in params.keys():\n self.df = params[\"train_df\"]\n if \"test_df\" in params.keys():\n self.df = pd.concat([self.df, params[\"test_df\"]])\n if \"n_splits\" in params.keys():\n self.n_splits = params[\"n_splits\"]\n if \"shuffle\" in params.keys():\n self.shuffle = params[\"shuffle\"]\n if \"random_state\" in params.keys():\n self.random_state = params[\"random_state\"]\n\n self.__validate_input()\n\n n_samples = num_of_samples(self.df)\n\n if self.n_splits > n_samples:\n raise ValueError(\n f\"Cannot have number of splits {self.n_splits} > number of\"\n f\" samples {n_samples}\"\n )\n\n indices = np.arange(n_samples)\n for test_indices in self.__iter_test_indices(n_samples):\n train_indices = indices[np.logical_not(test_indices)]\n test_indices = indices[test_indices]\n yield train_indices, test_indices", "def partition_instances(instances, split_attribute, attribute_domains):\n # this is a group by split_attribute's domain, not by\n # the values of this attribute in instances\n # example: if split_attribute is \"level\"\n attribute_domain = attribute_domains[split_attribute] # [\"Senior\", \"Mid\", \"Junior\"]\n # Build a dictionary\n partitions = {} # key (attribute value): value (list of instances with this attribute value)\n # For loop through attributes in dictionary\n for attribute_value in attribute_domain:\n partitions[attribute_value] = []\n for instance in instances:\n index = int(split_attribute[3:])\n if instance[index] == attribute_value:\n partitions[attribute_value].append(instance)\n return partitions", "def compute_splits(feature_df, target_col, max_num_splits):\n tree_estimator = DecisionTreeClassifier(max_leaf_nodes=max_num_splits+1,\n class_weight='balanced',\n random_state=1407)\n\n tree_estimator.fit(feature_df, target_col)\n thresholds = tree_estimator.tree_.threshold[tree_estimator.tree_.children_left != _tree.TREE_LEAF]\n return sorted(thresholds)", "def obtain_df_splits(data_csv):\n data_df = read_csv(data_csv)\n # Obtain data split array mapping data rows to split type\n # 0-train, 1-validation, 2-test\n splitter = get_splitter(\"random\")\n train_df, val_df, test_df = splitter.split(data_df, LocalTestBackend())\n return test_df, train_df, val_df", "def get_split(data):\n \"\"\" gets the best feature, and best value \"\"\"\n\n best_feature = None\n best_value = 0.0\n columns = data.columns\n gini_base = gini_impurity(data)\n n_rows = len(data.index) # total number of rows of data before split\n\n # Fininding which split yields the best gini gain\n max_gain = 0\n\n for i in range(len(columns)-1): # -1 b.c. class is final column\n xs = data[columns[i]].unique() # get values to test\n for x in xs: # test values\n # split dataset\n df_left = data[data[columns[i]] < x]\n df_right = data[data[columns[i]] >= x]\n\n # get gini impurities\n gini_left = gini_impurity(df_left)\n gini_right = gini_impurity(df_right)\n \n\n # Calculated weighted gini impurity\n w_left = len(df_left.index) / n_rows\n w_right = len(df_right.index) / n_rows\n\n w_gini = gini_left * w_left + gini_right * w_right\n \n\n # Calculate gini gain (we want to minimize w_gini for the smallest impurity. Ideal split is perfect Left=c1, Right=c2)\n # why not just find min w_gin instead of uding gini_gain and gini_base vaiables?\n gini_gain = gini_base - w_gini\n\n # check if this is the best split so far, store values, update max_gini\n if gini_gain > max_gain:\n best_feature = columns[i]\n best_value = x\n max_gain = gini_gain\n\n df_left = data.loc[data[best_feature] < best_value]\n df_right = data.loc[data[best_feature] >= best_value]\n \n\n return best_feature, best_value, df_left, df_right", "def __dividePandas(df, column, value):\n if isinstance(value, int) or isinstance(value, float): #Check if value is a #\n #Divide the rows into two sets and return them\n set1 = df[df[column] >= value] #Observations greater than or equal to value\n set2 = df[df[column] < value] #Observations less than value are in set2\n else:\n set1 = df[df[column] == value] #Observations equal to value are in set 1\n set2 = df[df[column] != value] #Observations not equal to value are in set2 \n return (set1, set2)", "def get_splits(datastore, query, num_splits, partition=None):\n\n # Validate that the number of splits is not out of bounds.\n if num_splits < 1:\n raise ValueError('The number of splits must be greater than 0.')\n\n if num_splits == 1:\n return [query]\n\n _validate_query(query)\n\n splits = []\n scatter_keys = _get_scatter_keys(datastore, query, num_splits, partition)\n last_key = None\n for next_key in _get_split_key(scatter_keys, num_splits):\n splits.append(_create_split(last_key, next_key, query))\n last_key = next_key\n\n splits.append(_create_split(last_key, None, query))\n return splits", "def _find_split(self, X, y, n_features):\r\n splits_info = []\r\n\r\n # Select features to consider\r\n features = self._feature_selection.get_features(n_features, self._feature_prob)\r\n\r\n # Get candidate splits\r\n for feature_id in features:\r\n for split_value in compute_split_values(X[:, feature_id]):\r\n splits_info.append(\r\n compute_split_info(self._split_criterion, X, y, feature_id, split_value, self._min_samples_leaf))\r\n\r\n splits = []\r\n for split_info in splits_info:\r\n if split_info is not None:\r\n gain, feature_id, split_value = split_info\r\n split = Split(feature_id, value=split_value, gain=gain)\r\n splits.append(split)\r\n else:\r\n continue\r\n\r\n selected_split = self._split_chooser.get_split(splits)\r\n return selected_split", "def dataset_splits(self):\n # 10% evaluation data\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 799,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }]", "def split_set(dataframe, test_size):\n i = np.floor(len(dataframe)*test_size).astype(int)\n set_a = dataframe[0:i].reset_index()\n set_b = dataframe[i:].reset_index()\n return set_a, set_b", "def get_splits(self):\n\t\treturn self.k", "def split(self, X):", "def generate_category_splits( records, index ):\n possible = {}\n for r in records:\n possible[ r.features[index] ] = True\n possible = possible.keys()\n splits = []\n for choice in generate_category_choice( possible ):\n choice.place( records, index )\n splits.append( choice )\n return splits", "def get_split(self,X,y):\n \n BEST_COL = 0\n BEST_SPLIT =0\n BEST_IMPUR = 99\n for i,feature in enumerate(X.T):\n arg_sort=np.argsort(feature) #Sort the feature for optimizing the find of splitting points\n feature= feature[arg_sort]\n y_sort = y[arg_sort]\n splits = self.possible_splits(feature,y_sort) #Get \n\n impur,splits = self.test_split(feature,y_sort,splits) #Get impurity for splitting points\n best_idx = np.argmin(impur)\n best_impur = impur[best_idx]\n \n if best_impur==0.0: #Found perfect split, terminate\n return(i,splits[best_idx])\n elif best_impur<BEST_IMPUR:\n BEST_IMPUR=best_impur\n BEST_SPLIT=splits[best_idx]\n BEST_COL=i\n return (BEST_COL,BEST_SPLIT)", "def split_data(df: pd.DataFrame):\n size = int(df.shape[0] * 0.8)\n indexes = np.random.choice(df.index, size, replace=False)\n train_set = df.loc[indexes]\n test_set = df.loc[~df.index.isin(indexes)]\n return train_set, test_set", "def partition_by(inputs, attribute):\n groups = defaultdict(list)\n for input in inputs:\n key = input[0][attribute]\n groups[key].append(input)\n return groups", "def split(self):\n return self.dataset_split", "def __divideset(rows, column, value):\n split_function = None #Initialize a variable split function.\n if isinstance(value, int) or isinstance(value, float): #Check if value is a number\n #True = the observation's value >= to the splitting criteria. False otherwise\n split_function = lambda row: row[column] >= value\n else:\n #If value is a string, True is where the observation's value == the criteria\n split_function = lambda row:row[column] == value\n \n #Divide the rows into two sets and return them\n set1 = [row for row in rows if split_function(row)]\n set2 = [row for row in rows if not split_function(row)]\n return (set1, set2)", "def split ( self, y, X = None ):\n # Make sure y is an array\n y = np.array ( y ) if isinstance ( y, list ) else y\n\n # Groupby y and add integer indices.\n df_with_split = (\n pd.DataFrame ( { \"y\": y, \"index\": np.arange ( len ( y ) ) } )\n .groupby ( \"y\" ) [ \"index\" ]\n .apply ( self.add_split_col ) # Add col for split for instance\n )\n\n # For each fold, get train and test indices (based on col for split)\n for cv_split in np.arange ( self.number_of_folds - 1, -1, -1 ):\n train_bool = df_with_split [ \"split\" ] != cv_split\n test_bool = ~ train_bool\n # Yield index values of not cv_split and cv_split for train, test\n yield df_with_split [ \"index\" ].values [ train_bool.values ], df_with_split [\n \"index\"\n ].values [ test_bool.values ]\n # End split()", "def split(self, x):\r\n new_beams = np.array([])\r\n for bar in self.bar_elements:\r\n new_beams = np.concatenate((new_beams, bar.split(x)))\r\n return BeamElements(new_beams)", "def making_dataset_list_train(data:pandas.core.frame.DataFrame, split_num:int) -> list:\n train_data_list = []\n for i in range(split_num):\n train_data_list.append(data[data['separate_num'] != i])\n for i in range(split_num):\n train_data_list[i] = train_data_list[i].drop(['separate_num'], axis = 1)\n return train_data_list", "def split_dataframe(df, size=10*1024*1024):\n \n # size of each row\n row_size = df.memory_usage().sum() / len(df)\n # maximum number of rows in each segment\n row_limit = int(size // row_size)\n # number of segments\n seg_num = (len(df)+row_limit-1)//row_limit\n # split df into segments\n segments = [df.iloc[i*row_limit : (i+1)*row_limit] for i in range(seg_num)]\n\n return segments", "def split_data(data: pd.DataFrame, parameters: Dict) -> List:\n # print(columns)\n X = data.drop(\"CHURNRISK\", axis=1).values\n y = data[\"CHURNRISK\"].values\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=parameters[\"test_size\"], random_state=parameters[\"random_state\"]\n )\n\n columns = data.columns\n columns = columns.drop([\"CHURNRISK\"])\n # 教師データとテストデータに分割される際にNumPy配列に変換されてしまっているためDataFrameに戻す\n X_train_df = pd.DataFrame(X_train)\n X_train_df.columns = columns\n X_test_df = pd.DataFrame(X_test)\n X_test_df.columns = columns\n y_train_df = pd.DataFrame(y_train)\n y_train_df.columns = [\"CHURNRISK\"]\n y_test_df = pd.DataFrame(y_test)\n y_test_df.columns = [\"CHURNRISK\"]\n\n return X_train_df, X_test_df, y_train_df, y_test_df", "def partition_mondrian(df, k, bias, relational_weight, quasi_identifiers):\n scale = __get_attribute_spans(df, df.index, quasi_identifiers)\n finished_partitions = []\n partitions = [df.index]\n partition_split_statistics = {attribute: 0 for attribute in quasi_identifiers}\n while partitions:\n partition = partitions.pop(0)\n if len(partition) >= 2 * k:\n logger.debug(\"Working on partition with length %d\", len(partition))\n spans = __get_attribute_spans(df, partition, quasi_identifiers, scale)\n for column, _ in __mondrian_split_priority(spans, bias, relational_weight):\n lp, rp = __split_partition(df[column][partition])\n if not __is_k_anonymous(lp, k) or not __is_k_anonymous(rp, k):\n continue\n if lp.equals(rp):\n break\n else:\n logger.debug(\"Splitting partition on attribute %s into two partitions with size %d and %d\", column, len(lp), len(rp))\n partition_split_statistics[column] += 1\n partitions.extend((lp, rp))\n break\n else:\n finished_partitions.append(partition)\n else:\n finished_partitions.append(partition)\n logger.debug(\"%d partitions remaining\", len(partitions))\n return finished_partitions, partition_split_statistics", "def partition_by(inputs, attribute):\n groups = defaultdict(list)\n for input in inputs:\n key = input[0][attribute] # get the value of the specified attribute\n groups[key].append(input) # then add this input to the correct list\n return groups", "def split(self, by, return_dict=True):\n groups = np.unique(by)\n ix = [np.where(by == groups[i])[0] for i in range(len(groups))]\n if return_dict:\n output = {k:self.subset(i) for k,i in zip(groups, ix)}\n else:\n output = [self.subset(i) for i in ix]\n return output", "def calcGiniSplitByColumn(self, data, structure, colIName):\n colIndex, giniSplit = structure[colIName]['index'], 0\n for value in structure[colIName][\"values\"]:\n newData = list(filter(lambda x: x[colIndex] == value, data))\n p = len(newData) / len(data)\n giniSplit += self.calcDataGini(newData, structure) * p\n return round(giniSplit, 3)", "def one2all(df, val_size=2, seed=0xBadCafe):\n folds = sorted(df.fold.unique())\n split = []\n for f in folds:\n idx_b = df[df.fold == f].index.tolist()\n test_ids = df[df.fold != f].index.tolist()\n train_ids, val_ids = train_test_split(idx_b, test_size=val_size, random_state=seed)\n split.append([train_ids, val_ids, test_ids])\n return split", "def get_n_splits(self):\n pass", "def best_split(self):\n sub_group = []\n\n current_entropy = self.entropy(self._Passengers)\n best_gain = 0 # holds the best entropy difference so far\n best_split = self._Attr[0].get_name()\n relative_entropy = 0 # entropy while taking account for the size of the population\n\n for Attribute in self._Attr:\n relative_entropy = 0\n print(\"Attr considered: \" + Attribute.get_name())\n for Attr_option in Attribute.get_options():\n sub_group = []\n for Passenger in self._Passengers:\n if self.passenger_attr_option_check(Passenger,\n Attribute.get_name(),\n Attr_option): # if P.A = V\n sub_group.append(Passenger)\n if len(sub_group) > 0 and len(self._Passengers) > 0:\n relative_entropy += self.entropy(sub_group) * (len(sub_group)/len(self._Passengers))\n\n if current_entropy - relative_entropy > best_gain:\n best_gain = current_entropy - relative_entropy\n best_split = Attribute.get_name()\n\n print(f\"best split:{best_split} \\n with entropy gain of:\\n {best_gain}\")\n\n return best_split", "def df2chunkset(df, chunktag='chunktag', guesstag='guesstag'):\n go, ge = set(), set()\n if df.iloc[0][chunktag][0] not in 'BOS':\n raise ValueError('Invalid chunktag on first token.')\n if df.iloc[0][guesstag][0] not in 'BOS':\n raise ValueError('Invalid guesstag on first token.')\n chunk_go = [(0, df.iloc[0][chunktag])]\n chunk_ge = [(0, df.iloc[0][guesstag])]\n for tid, r in df.iloc[1:].iterrows():\n if r[chunktag][0] in 'BOS':\n # start new\n go.add(tuple(chunk_go))\n chunk_go = [(tid, r[chunktag])]\n else:\n # continue chunk\n chunk_go.append((tid, r[chunktag]))\n if r.guesstag[0] in 'BOS':\n # start new\n ge.add(tuple(chunk_ge))\n chunk_ge = [(tid, r[guesstag])]\n else:\n # continue chunk\n chunk_ge.append((tid, r[guesstag]))\n\n if chunk_ge:\n ge.add(tuple(chunk_ge))\n if chunk_go:\n go.add(tuple(chunk_go))\n\n return go, ge", "def split_data(df):\n # drop any instances that have missing values\n df = df.dropna()\n\n # define features\n features = df[['pitch_type', 'release_speed', 'release_spin_rate',\n 'if_fielding_alignment', 'launch_angle', 'launch_speed',\n 'hc_x', 'hc_y', 'stand', 'type', 'RH']]\n\n # make dummies for categorical features\n features = pd.get_dummies(features)\n\n # define label\n label = df['hit']\n\n # split data into test and training\n features_train, features_test, label_train, label_test = \\\n train_test_split(features, label, test_size=0.3)\n\n standard = StandardScaler()\n\n features_train = standard.fit_transform(features_train)\n features_test = standard.transform(features_test)\n\n return features_train, features_test, label_train, label_test", "def build_splits(dataset, train_size, valid_size, by=['context_id'], seed=17):\n if isinstance(seed, RandomState):\n rng = seed\n else:\n rng = RandomState(seed)\n\n groups = dataset.groupby(by).groups\n context_ids = groups.keys()\n\n train_ids, other_ids = sklearn.cross_validation.train_test_split(\n context_ids, train_size=train_size, random_state=rng)\n valid_ids, test_ids = sklearn.cross_validation.train_test_split(\n other_ids, train_size=valid_size, random_state=rng)\n\n train_idx = context_id_to_idx(train_ids, groups)\n valid_idx = context_id_to_idx(valid_ids, groups)\n test_idx = context_id_to_idx(test_ids, groups)\n\n return dataset.ix[train_idx, :], dataset.ix[valid_idx, :], dataset.ix[test_idx, :]", "def splits(self) -> List[int]:\n if self._splits is None:\n self.RefreshStats()\n return self._splits", "def test_split(self):\n array = np.arange(1000)\n df = DataFlow.from_numpy(array)\n\n # first, test throw errors on invalid arguments\n def assert_invalid_arg(**kwargs):\n with self.assertRaises(ValueError):\n df.split(**kwargs)\n assert_invalid_arg(partitions=[])\n assert_invalid_arg(partitions=[1000, 1])\n assert_invalid_arg(partitions=[1000, -1])\n assert_invalid_arg(partitions=[1, 2])\n assert_invalid_arg(portions=[])\n assert_invalid_arg(portions=[1.0, 0.1])\n assert_invalid_arg(portions=[1.0, -1])\n assert_invalid_arg(portions=[0.1, 0.2])\n\n # next, test split without shuffling\n df1, df2, df3 = df.split(partitions=[700, 200, 100])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n df1, df2, df3 = df.split(portions=[-1, 0.2, 0.1])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n\n # finally, test split with shuffling\n df1, df2 = df.split(portions=[0.5, -1], shuffle=True)\n self.assertEquals(len(df1), 500)\n self.assertEquals(len(df2), 500)\n df_array = np.concatenate([df1.all()[0], df2.all()[0]], axis=0)\n self.assertFalse(np.all(df_array == array))\n np.testing.assert_array_equal(np.sort(df_array), array)", "def split_by_attribute(dbsession, group, attr):\n values = []\n for item in group.items:\n if attr in item.attributes and item.attributes[attr]:\n values.extend(item.attributes[attr])\n categories = [\n (v, c) for v, c in Counter(values).most_common() if c < len(group.items) * 0.6666 and c >= 15 # noqa: PLR2004\n ]\n if categories:\n category_values = [v for v, _ in categories]\n has_values = 0\n for item in group.items:\n found = False\n for value in item.attributes[attr]:\n if value in category_values:\n found = True\n break\n if found:\n has_values = has_values + 1\n if has_values / len(group.items) > 0.9: # noqa: PLR2004\n categories.reverse()\n for category in categories:\n new_group = Group(\n value=category[0], label=f\"{group.label} - {category[0]}\", parent=group, split=\"attribute\"\n )\n dbsession.add(new_group)\n for item in list(group.items):\n if category[0] in item.attributes[attr]:\n item.group = new_group\n new_group = Group(value=group.label, label=group.label, parent=group, split=\"attribute\")\n dbsession.add(new_group)\n for item in list(group.items):\n item.group = new_group\n return True\n return False", "def possible_splits(self,feature,y):\n\n yi = y[:-1]\n yi1= y[1:]\n idx=np.argwhere((yi1-yi)!=0)\n return idx.flatten()", "def splits_concept_levels(data: pd.DataFrame, type_col: Optional[str], concept_strings: List) -> List:\n\n con_string, anc_string = concept_strings\n data = data.copy().replace(r'^\\s*$', np.nan, regex=True)\n\n # extract relevant columns\n if type_col is not None:\n all_cols = [x for x in data.columns if type_col not in x]\n conc_type = [x for x in data.columns if con_string.upper() in x.upper() and type_col.upper() in x.upper()]\n conc_type_uri = [x for x in conc_type if x.upper().endswith('URI')][0]\n anc_type = [x for x in data.columns if anc_string.upper() in x.upper() and type_col.upper() in x.upper()]\n anc_type_uri = [x for x in anc_type if x.upper().endswith('URI')][0]\n # extract concept codes from ancestor codes\n concept = data[all_cols + conc_type].dropna(subset=conc_type, how='all').drop_duplicates()\n ancestor = data[all_cols + anc_type].dropna(subset=anc_type, how='all').drop_duplicates()\n # get counts of ontology concepts at each concept level\n concept_ont_codes = [i for j in [x.split(' | ') for x in list(concept[conc_type_uri])] for i in j]\n ancestor_ont_codes = [i for j in [x.split(' | ') for x in list(ancestor[anc_type_uri])] for i in j]\n else:\n concept = data[[x for x in data.columns if x.startswith(con_string)]].dropna(how='all').drop_duplicates()\n ancestor = data[[x for x in data.columns if x.startswith(anc_string)]].dropna(how='all').drop_duplicates()\n concept_ont_codes, ancestor_ont_codes = [], []\n\n return [(concept, concept_ont_codes), (ancestor, ancestor_ont_codes)]", "def _partitionize(df, settings, grids, frag):\n column = settings['feature']\n if len(df) > 0:\n init, end, end2 = grids\n tmp = df.apply(lambda row: _inblock(row, column, init, end), axis=1)\n tmp = df.loc[tmp]\n\n if len(frag) > 0:\n frag = pd.concat([frag, tmp])\n else:\n frag = tmp\n return frag", "def split_df(df, n_chunks):\n chunk_size = int(np.ceil(df.shape[0] / n_chunks))\n assert n_chunks * chunk_size >= df.shape[0]\n chunks = []\n for i in range(0, df.shape[0], chunk_size):\n chunks.append(df[i:i + chunk_size])\n assert len(chunks) == n_chunks\n return chunks", "def split_data(input_df, output_df):\n return train_test_split(input_df, output_df, test_size=0.2, random_state=42,\n stratify=output_df)", "def split_dataset(df_playlists, df_interactions):\n df_train_pl, cat_pids = generate_train(df_playlists)\n df_test_pl, df_test_itr, df_eval_itr, df_train_itr = generate_test(cat_pids, df_playlists, df_interactions)\n\n return df_train_pl, df_train_itr, df_test_pl, df_test_itr, df_eval_itr", "def split(self, place_leaf_splitted):\n raise NotImplementedError", "def single_cv(df, n_splits=3, val_size=2, seed=0xBadCafe):\n folds = sorted(df.fold.unique())\n split = []\n for f in folds:\n idx_b = df[df.fold == f].index.tolist()\n cv_b = train_val_test_split(idx_b, val_size=val_size, n_splits=n_splits, random_state=seed)\n for cv in cv_b:\n split.append(cv)\n return split", "def get_split_conds():\n conds = []\n for splitter in split_config_registry.data:\n splitter_cls = split_config_registry.data[splitter]\n other_props = schema_utils.unload_jsonschema_from_marshmallow_class(splitter_cls)['properties']\n schema_utils.remove_duplicate_fields(other_props, TYPE)\n splitter_cond = schema_utils.create_cond({'type': splitter}, other_props)\n conds.append(splitter_cond)\n return conds", "def split_dataframe(df, n_split, axis=0):\n\n # TODO: implement axis logic\n\n if df.shape[0] < n_split:\n raise ValueError(\n 'n_split ({}) can\\'t be greater than the number of rows ({}).'.\n format(n_split, df.shape[0]))\n elif n_split <= 0:\n raise ValueError('n_split ({}) can\\'t be less than 0.'.format(n_split))\n\n n = df.shape[0] // n_split\n\n splits = []\n\n for i in range(n_split):\n start_i = i * n\n end_i = (i + 1) * n\n splits.append(df.iloc[start_i:end_i, :])\n\n i = n * n_split\n if i < df.shape[0]:\n splits.append(df.ix[i:])\n\n return splits", "def isplit(iterable, splitters):\n return [list(g) for k,g in itertools.groupby(iterable,lambda x:x in splitters) if not k]", "def autosplit(self):\n result = RowSet()\n for row in self:\n result.append(row.autosplit())\n return result", "def train_test_split_drifters():\n df = process_raw_df()\n ids = np.unique(df.index.get_level_values(level=0))\n rng = np.random.default_rng(seed=1)\n train_ids = np.sort(rng.choice(ids, size=len(ids)//2, replace=False))\n test_ids = np.sort(np.setdiff1d(ids, train_ids))\n train_df = df[df.index.get_level_values(level=0).isin(train_ids)].copy()\n test_df = df[df.index.get_level_values(level=0).isin(test_ids)].copy()\n return train_df, test_df", "def split_dataset(dataset):\n sequence = dataset.split()\n return sequence", "def split_dataset(dataset):\n sequence = dataset.split()\n return sequence", "def split(self, amount):\n split_objs = list(self.all())\n if not split_objs:\n raise NoSplitsFoundForRecurringCost()\n\n portions = [split_obj.portion for split_obj in split_objs]\n\n split_amounts = ratio_split(amount, portions)\n return [\n (split_objs[i], split_amount)\n for i, split_amount\n in enumerate(split_amounts)\n ]", "def splitXY(self, df = None):\n df = df if isinstance(df, pd.DataFrame) else self.df # <-- default\n return tuple(df.filter(regex = self.regex[k])\n for k in ('features', 'targets'))", "def get_subset_split(annotation_data):\r\n\r\n # Get the audio filenames and the splits without duplicates\r\n data = annotation_data[['split', 'audio_filename', 'annotator_id']]\\\r\n .groupby(by=['split', 'audio_filename'], as_index=False)\\\r\n .min()\\\r\n .sort_values('audio_filename')\r\n\r\n train_idxs = []\r\n valid_idxs = []\r\n\r\n for idx, (_, row) in enumerate(data.iterrows()):\r\n if row['split'] == 'train':\r\n train_idxs.append(idx)\r\n elif row['split'] == 'validate' :\r\n # For validation examples, only use verified annotations\r\n valid_idxs.append(idx)\r\n\r\n return np.array(train_idxs), np.array(valid_idxs)", "def split_dataset(df, predict_window):\n\n #split dataset into train and test datasets\n #train 80 percent of rows\n dataset_train = np.array(df[:int(df.shape[0]*0.8)])\n\n #test dataset is 20 percent of rows\n #50 - that's where historical data and prediction overlap\n dataset_test = np.array(df[int(df.shape[0]*0.8)- predict_window:])\n\n return dataset_train, dataset_test", "def split(self,X,y=None):\n all_idx = pd.Series(np.arange(X.shape[0])) \n mbrg = int(X.shape[0]*self.embargo_pct)\n test_starts=[(i[0],i[-1]+1) for i in np.array_split(all_idx.values,self.n_splits)]\n for i, j in test_starts:\n t0 = all_idx.index[i] # start of test set\n test_indices = all_idx.values[i:j]\n maxT1Idx = all_idx.index.searchsorted(all_idx[test_indices].max())\n train_indices = all_idx.index.searchsorted(all_idx[all_idx<=t0].index)\n if maxT1Idx < X.shape[0]: \n train_indices=np.concatenate((train_indices,all_idx[maxT1Idx+mbrg:]))\n yield train_indices,test_indices", "def _split_flattened(data, split_ratio, seed=default.DEFAULT_CV_RANDOM_SEED):\n\n check.argument_numeric(split_ratio, 0, 1)\n\n pc = np.sum(data.values != 0)\n gs_count = int(split_ratio * pc)\n idx = _make_shuffled_index(pc, seed=seed)\n\n pr_idx = data.values[data.values != 0].copy()\n gs_idx = data.values[data.values != 0].copy()\n\n pr_idx[idx[0:gs_count]] = 0\n gs_idx[idx[gs_count:]] = 0\n\n gs = data.values.copy()\n pr = data.values.copy()\n\n gs[gs != 0] = gs_idx\n pr[pr != 0] = pr_idx\n\n priors_data = pd.DataFrame(pr, index=data.index, columns=data.columns)\n gold_standard = pd.DataFrame(gs, index=data.index, columns=data.columns)\n\n return priors_data, gold_standard", "def param_selection(df):\n n = df.count()\n numTrees = np.round(np.log10(n) * 100)\n maxDepth = np.round(np.log(n))\n minInstancesPerNode = np.round(np.log10(n) * (np.ceil(n / 500000) + 1))\n #maxBins = np.minimum(80, np.round(500 / np.log(n)))\n subsamplingRate = float(np.where(n > 500000, 0.6, 0.8))\n maxIter = np.round(np.log10(n) * 50)\n\n # minInstancesPerNode\n\n minInstancesPerNode = 200 if minInstancesPerNode > 200 else maxDepth\n minInstancesPerNode = 25 if minInstancesPerNode < 25 else minInstancesPerNode\n\n # maxDepth\n\n maxDepth = 15 if maxDepth > 15 else maxDepth\n maxDepth = 3 if maxDepth < 3 else maxDepth\n\n # maxIter applies to GBT\n\n maxIter = 200 if maxIter > 100 else maxIter\n maxIter = 50 if maxIter < 50 else maxIter\n\n # maxBins set to 32\n\n maxBins = 32\n\n print \"[Info] numTrees: \" + str(numTrees)\n print \"[Info] maxDepth: \" + str(maxDepth)\n print \"[Info] minInstancesPerNode: \" + str(minInstancesPerNode)\n print \"[Info] maxBins: \" + str(maxBins)\n print \"[Info] subsamplingRate: \" + str(subsamplingRate)\n print \"[Info] maxIter: \" + str(maxIter)\n\n return numTrees, maxDepth, minInstancesPerNode, maxBins, subsamplingRate, maxIter", "def split_by_class(position_col):\n name = position_col.name\n positions = position_col.values\n pos_by_class = {\"A\"+name :[], \"B\"+name :[], \"C\"+name :[], \"F\"+name :[]}\n number_letter = [\"0\",'A','B', 'C', 'F']\n for pos in positions:\n splited_pos = pos.split(\"x\")\n for i in [1,2,3,4]:\n if pos == \"Ligand\":\n pos_by_class[number_letter[i]+name].append(\"Ligand\")\n else:\n pos_by_class[number_letter[i]+name].append(splited_pos[0]+\"x\"+splited_pos[i])\n\n return(pd.DataFrame.from_dict(pos_by_class))", "def split_db_original(x, components):\n cm = components[1]\n ap = []\n for itera in cm:\n ap.append(x[:, itera].tolist())\n ap_np = np.transpose(np.array(ap))\n\n return ap_np", "def split_data(df, train_prop):\n # Create random Tensors to hold inputs and outputs, and wrap them in Variables\n train_df = df.sample(frac=train_prop)\n test_df = df.loc[~df.index.isin(train_df.index)]\n return train_df, test_df", "def split_by_standard(df, compl_data):\n \n #Find non-standard simulations\n standard_simulations = { dyn for dyn in compl_data if is_gpcrmd_community} \n df_standard = df[df.columns[df.columns.isin(standard_simulations)]]\n \n return(df_standard)", "def split(self) -> GQASplit:\n return self._split", "def get_split_conds():\n conds = []\n for splitter in split_config_registry.data:\n splitter_cls = split_config_registry.data[splitter]\n other_props = schema_utils.unload_jsonschema_from_marshmallow_class(splitter_cls)[\"properties\"]\n other_props.pop(\"type\")\n splitter_cond = schema_utils.create_cond(\n {\"type\": splitter},\n other_props,\n )\n conds.append(splitter_cond)\n return conds", "def _get_splits(self, n_examples, seed):\n\n if seed is not None:\n rng = default_rng(seed)\n else:\n rng = default_rng()\n\n data_rows = list(range(n_examples))\n rng.shuffle(data_rows)\n\n split_rows = [data_rows[pair[0] : pair[1]]\n for pair in self.split_indices]\n\n return split_rows", "def split_data(data,split_column,split_value):\n split_column_value = data[:,split_column]\n type_of_feature = FEATURE_TYPE[split_column]\n\n if type_of_feature == 'continuous':\n data_below = data[split_column_value <= split_value]\n data_above = data[split_column_value > split_value]\n else:\n data_below = data[split_column_value == split_value]\n data_above = data[split_column_value != split_value]\n return data_below,data_above", "def split_features(data):\n X = data.copy()\n #split nucleotid string (len=60) into a list of independent characters (DNA nucleotids)\n X['dna'] = X['dna'].map(lambda x : list(str(x).strip()))\n #create 60 new attributes (columns) for each DNA nucleotide index\n #each attribute has name dna_idx where idx is index (1-based) in the list above\n for idx in range(60):\n X['dna_%d' % (idx+1)] = X['dna'].map(lambda x : x[idx])\n #remove the old dna column (redundant information)\n del X['dna']\n #remove descriptor\n del X['id']\n \n return X", "def split_data(num_samples, num_splits):\n\n kf = sklearn.model_selection.KFold(n_splits=num_splits, random_state=0);\n return kf.split(range(num_samples))", "def group(df, dvmin, dvmax, step):\n\tr = step/2\n\tres = []\n\n\tfor ticker in range(dvmin, dvmax, step):\n\t\t#select values by left-right difference in sum in range (x-r, x+r). x is the middle value of a bucket. \n\t\tsubgroup = df.loc[(df['diff']>ticker-r) & (df['diff']<ticker+r)\n\t\t\t& (df['choice'] != 0.5)]\n\t\t#count frequency of choosing left\n\t\tnum = subgroup['choice'].sum()\n\t\t#total number of datapoints in the bucket\n\t\tdenom = subgroup.shape[0]\n\t\t#calculate and append the prob. append 0 if empty bucket\n\t\tres.append(num/denom) if denom else res.append(0)\n\treturn res", "def get_splits(ntot, nper):\n beglist = numpy.arange(0,ntot,nper)\n endlist = numpy.arange(0,ntot,nper) + nper - 1\n\n if (ntot % nper) != 0:\n endlist[-1] = ntot-1\n return beglist, endlist", "def find_best_split(rows):\n best_gain = 0 # keep track of the best information gain\n best_question = None # keep train of the feature / value that produced it\n current_uncertainty = gini(rows)\n n_features = len(rows[0]) - 1 # number of columns\n #print(\"n_features:\", n_features)\n\n for col in range(1,n_features): # for each feature\n # for each iteration this is the set of all values of a specific column, eg, All pixels number 0\n values = set([row[col] for row in rows]) # unique values in the column\n for val in values: # for each value\n\n # Create a question object for each val under a column, holding the val and the col number\n question = Question(col, val)\n\n # try splitting the dataset\n true_rows, false_rows = partition(rows, question)\n\n # Skip this split if it doesn't divide the\n # dataset.\n if len(true_rows) == 0 or len(false_rows) == 0:\n continue\n\n # Calculate the information gain from this split\n gain = info_gain(true_rows, false_rows, current_uncertainty)\n\n # You actually can use '>' instead of '>=' here\n # but I wanted the tree to look a certain way for our\n # toy dataset.\n if gain >= best_gain:\n best_gain, best_question = gain, question\n\n return best_gain, best_question", "def title_based_split(df, split_val):\n # retrieve split title and get the minimum id with that title\n split_title = df['title'].iloc[int(split_val * len(df)) - 1]\n split_index = df[df['title'] == split_title].index.min()\n return df.iloc[:split_index], df.iloc[split_index:]", "def getObjectComponents(df):\n return df.loc[getObjectComponentIndexes(df)]", "def split ( self, feature_matrix: np.ndarray, target_array: np.ndarray = None ):\n # Split the indices into `number_of_folds` subarray\n indices = self.get_indices ( feature_matrix )\n split_indices = KFoldCV._get_indices_split ( indices = indices, number_of_folds = self.number_of_folds )\n for number_of_split in range ( self.number_of_folds ):\n # Return all but one split as train, and one split as test\n yield KFoldCV._get_one_split ( split_indices, number_of_split = number_of_split )\n # End split()", "def train_test_split(df, random_state=42):\n if len(df) < 3:\n print('no bueno')\n train, test = train_test_split(df, test_size=.2, random_state=random_state)\n train, val = train_test_split(train, test_size=.2, random_state=random_state)\n return train, test, val", "def all2one(df, val_size=2, seed=0xBadCafe):\n folds = sorted(df.fold.unique())\n split = []\n for f in folds:\n idx_b = df[df.fold != f].index.tolist()\n test_ids = df[df.fold == f].index.tolist()\n train_ids, val_ids = train_test_split(idx_b, test_size=val_size, random_state=seed)\n split.append([train_ids, val_ids, test_ids])\n return split" ]
[ "0.6871864", "0.6392091", "0.62676346", "0.60429597", "0.60380805", "0.603773", "0.5964825", "0.5956346", "0.5944079", "0.5930761", "0.58742964", "0.58675", "0.5851564", "0.5841597", "0.5820882", "0.5810458", "0.5741932", "0.573894", "0.57379395", "0.5715522", "0.57015127", "0.5685041", "0.5684652", "0.5671033", "0.56452507", "0.56162703", "0.56144196", "0.55983806", "0.5597305", "0.55369955", "0.551719", "0.54964256", "0.54933566", "0.54856944", "0.5470619", "0.54662687", "0.54574823", "0.5443917", "0.5437532", "0.5428439", "0.5402875", "0.53768855", "0.5374718", "0.5368073", "0.53622603", "0.5355105", "0.5340921", "0.53233397", "0.5313849", "0.5296965", "0.52946323", "0.52833426", "0.5278117", "0.52759206", "0.5265497", "0.52515095", "0.5244997", "0.52191514", "0.5218711", "0.521602", "0.52151966", "0.52127147", "0.5210636", "0.5164858", "0.51598656", "0.51593757", "0.5151123", "0.51450825", "0.5144484", "0.5133282", "0.5129084", "0.51283205", "0.5124902", "0.5123616", "0.5123616", "0.51199883", "0.51138103", "0.51092416", "0.5108941", "0.5100919", "0.50989944", "0.509774", "0.5088728", "0.5085987", "0.50758016", "0.50715697", "0.5070669", "0.5057355", "0.50545347", "0.50368553", "0.503377", "0.50304323", "0.50250024", "0.5013303", "0.50126106", "0.501215", "0.50111353", "0.5010552", "0.5006831", "0.5006745" ]
0.81955785
0
helper function to evaluate gini value for the split This function gives Gini Value for the split generated by 'split' on 'attribute' in the given dataframe
def evaluate_split( df, attribute, split ): mask = df[attribute] <= split # split the dataset on the split attribute dfl = df[mask] dfr = df[~mask] # calculate weighting factors for child weighting_factor_left = float(dfl.shape[0])/df.shape[0] weighting_factor_right = float(dfr.shape[0])/df.shape[0] # calculate gini for left and right gini_parent = gini_impurity(df) gini_left = gini_impurity(dfl) gini_right = gini_impurity(dfr) # calculate weighted gini for this split weighted_gini = gini_parent - (weighting_factor_left*gini_left + weighting_factor_right*gini_right) return weighted_gini
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calcGiniSplitBySplitValue(self, data, structure, colIndex, splitValue):\n dataBellow = list(filter(lambda x: float(x[colIndex]) <= splitValue, data))\n dataAbove = list(filter(lambda x: float(x[colIndex]) > splitValue, data))\n giniSplit = (len(dataBellow) / len(data)) * self.calcDataGini(dataBellow, structure) +\\\n (len(dataAbove) / len(data)) * self.calcDataGini(dataAbove, structure)\n return round(giniSplit, 3)", "def __gini(self, data_set, split_feature, target_feature):\n frequencies = self.__calculate_frequency(data_set, split_feature)\n gini_value = 1.0\n\n # Calculate the gini of the data.\n for value, frequency in frequencies.items():\n probability = frequency / sum(frequencies.values())\n gini_value -= math.pow(probability, 2)\n\n return gini_value", "def calcGiniSplitByColumn(self, data, structure, colIName):\n colIndex, giniSplit = structure[colIName]['index'], 0\n for value in structure[colIName][\"values\"]:\n newData = list(filter(lambda x: x[colIndex] == value, data))\n p = len(newData) / len(data)\n giniSplit += self.calcDataGini(newData, structure) * p\n return round(giniSplit, 3)", "def get_split(data):\n \"\"\" gets the best feature, and best value \"\"\"\n\n best_feature = None\n best_value = 0.0\n columns = data.columns\n gini_base = gini_impurity(data)\n n_rows = len(data.index) # total number of rows of data before split\n\n # Fininding which split yields the best gini gain\n max_gain = 0\n\n for i in range(len(columns)-1): # -1 b.c. class is final column\n xs = data[columns[i]].unique() # get values to test\n for x in xs: # test values\n # split dataset\n df_left = data[data[columns[i]] < x]\n df_right = data[data[columns[i]] >= x]\n\n # get gini impurities\n gini_left = gini_impurity(df_left)\n gini_right = gini_impurity(df_right)\n \n\n # Calculated weighted gini impurity\n w_left = len(df_left.index) / n_rows\n w_right = len(df_right.index) / n_rows\n\n w_gini = gini_left * w_left + gini_right * w_right\n \n\n # Calculate gini gain (we want to minimize w_gini for the smallest impurity. Ideal split is perfect Left=c1, Right=c2)\n # why not just find min w_gin instead of uding gini_gain and gini_base vaiables?\n gini_gain = gini_base - w_gini\n\n # check if this is the best split so far, store values, update max_gini\n if gini_gain > max_gain:\n best_feature = columns[i]\n best_value = x\n max_gain = gini_gain\n\n df_left = data.loc[data[best_feature] < best_value]\n df_right = data.loc[data[best_feature] >= best_value]\n \n\n return best_feature, best_value, df_left, df_right", "def getSplit(self):\n b_index, b_value, b_score, b_groups = 999, 999, 999, None\n for j in range(len(self[0]) - 1):\n for i in range(len(self)):\n groups = self.splitAttribute(j, self[i][j]) # lit, big\n gini = self.giniIndex(groups)\n if gini < b_score and (j, \"%.1f\" % self[i][j]) not in self.atr:\n b_index, b_value, b_score, b_groups = j, self[i][\n j], gini, groups\n return b_index, b_value, b_groups, b_score", "def findBestValueSplitByGini(self, data, structure, colIndex):\n minGini, bestSplit = 1, []\n for i in range(0, len(data)-1):\n split = (float(data[i][colIndex]) + float(data[i+1][colIndex])) / 2\n giniSplit = self.calcGiniSplitBySplitValue(data, structure, colIndex, split)\n if giniSplit <= minGini:\n minGini = giniSplit\n bestSplit = [split, giniSplit]\n return bestSplit", "def get_possible_splits( df , attribute ):\n ds = df.loc[:,attribute]\n \n # First sort the values \n ds = ds.sort_values().drop_duplicates()\n \n # Compute averages of consecutive values \n ds = ds.rolling(2).sum().divide(2)\n splits = ds[1:].tolist()\n \n # return the possible splits \n return splits", "def best_split1(self,X,attributes):\n if (self.criterion==\"information_gain\"):\n global_if = float('-inf') # the highest value of varience seen so far\n attr , val = None, None\n for attribute in attributes[::-1]:\n attr_val = pd.Series(X[attribute].unique()).sort_values(ignore_index=True)\n last_val = attr_val[0]\n for i in range(1,attr_val.size):\n cur_val = attr_val[i]\n valc = round((last_val+cur_val)/2,4)\n last_val = cur_val\n cur_if = information_gain1(valc,X[attribute],X[\"Output\"],self.type)\n if (cur_if>global_if):\n global_if,attr,val = cur_if,attribute,valc\n return attr,val\n else:\n global_if = float('inf') # the lowest value of varience seen so far\n attr , val = None, None\n for attribute in attributes[::-1]:\n attr_val = pd.Series(X[attribute].unique()).sort_values(ignore_index=True)\n last_val = attr_val[0]\n for i in range(1,attr_val.size):\n cur_val = attr_val[i]\n valc = round((last_val+cur_val)/2,4)\n last_val = cur_val\n cur_if = gini_gain1(X[\"Output\"],X[attribute], valc)\n if (global_if>cur_if):\n global_if,attr,val = cur_if,attribute,valc\n return attr,val", "def splitmetric(self, dataset, attr, target_attr):\n freq = {}\n splitinfo = 0.0\n \n #Call information gain\n gain = ID3.splitmetric(self, dataset, attr, target_attr);\n samplenumbers = len(dataset)\n # Calculate the frequency of each of the values in the split attribute\n for record in dataset:\n if (record[attr] in freq):\n freq[record[attr]] += 1.0\n else:\n freq[record[attr]] = 1.0\n \n #Calculate split info, entropy of splitter\n for val in list(freq.values()):\n splitinfo += (- val / samplenumbers) * math.log(val / samplenumbers, 2)\n \n #Split info equals 0 when there only one class in data set\n if splitinfo == 0:\n splitinfo = 0.00000001\n \n return gain / splitinfo", "def best_split(self, X, y, attributes):\n if (self.criterion==\"information_gain\"):\n global_if = float('-inf') # the highest value of information gain/gini gain seen so far\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = information_gain(y,attr_val,self.type)\n if (cur_if>global_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr\n else:\n global_if = float('inf')\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = gini_gain(y,attr_val)\n if (global_if>cur_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr", "def test_split(self,X,y,splits):\n n_data = len(y) #Number of data points\n splits=(X[splits]+X[splits+1])/2\n\n idx_greater = (X>splits[:,None]) #index for greater split\n idx_lower = (X<splits[:,None]) #index for lower split\n\n imp_greater =[self.impurity(y[idx]) for idx in idx_greater] #impurity for greater\n imp_lower = [self.impurity(y[idx]) for idx in idx_lower] #impurity lower\n\n impur = [sum(idx_great)/n_data*imp_great+sum(idx_low)/n_data*imp_low for idx_great,imp_great,idx_low,imp_low in zip(idx_greater,imp_greater,idx_lower,imp_lower)] #Weighted impurity\n return (impur,splits)", "def split_data(data,split_column,split_value):\n split_column_value = data[:,split_column]\n type_of_feature = FEATURE_TYPE[split_column]\n\n if type_of_feature == 'continuous':\n data_below = data[split_column_value <= split_value]\n data_above = data[split_column_value > split_value]\n else:\n data_below = data[split_column_value == split_value]\n data_above = data[split_column_value != split_value]\n return data_below,data_above", "def __dividePandas(df, column, value):\n if isinstance(value, int) or isinstance(value, float): #Check if value is a #\n #Divide the rows into two sets and return them\n set1 = df[df[column] >= value] #Observations greater than or equal to value\n set2 = df[df[column] < value] #Observations less than value are in set2\n else:\n set1 = df[df[column] == value] #Observations equal to value are in set 1\n set2 = df[df[column] != value] #Observations not equal to value are in set2 \n return (set1, set2)", "def getListWithBestValueSplitsOfDataByGini(self, data, structure, colIndex, numOfSplits):\n splitsList, newSplitsList = {}, []\n self.fillDictWithBestValueSplitsOfDataByGini(data, structure, colIndex, numOfSplits, splitsList, 0)\n for lists in list(splitsList.values())[1:]:\n while len(lists) > 0:\n splitOne, splitTwo = [], []\n if len(lists) > 0:\n splitOne = lists.pop()\n if len(lists) > 0:\n splitTwo = lists.pop()\n if splitOne and splitTwo:\n if splitOne[1] <= splitTwo[1]:\n newSplitsList.append(splitOne[0])\n newSplitsList.append(splitTwo[0])\n else:\n newSplitsList.append(splitTwo[0])\n newSplitsList.append(splitOne[0])\n elif splitOne:\n newSplitsList.append(splitOne[0])\n elif splitTwo:\n newSplitsList.append(splitTwo[0])\n\n newSplitsList.insert(0, splitsList['0'][0][0])\n self.removeDuplicatesInList(newSplitsList)\n return newSplitsList[0:numOfSplits]", "def split(x):\r\n with open(\"iris.csv\") as f:#Open the csv file which will now be called \"f\"\r\n attribute=[] # list with name \"attribute\"\r\n feature=[] # list with name feature\r\n for line in f: # for loop - iterates through all the values in f\r\n jay = line.split(\",\") #for each line in the dataset split the lines into new individual lists e.g. [3.5,1.4,0.2,Iris-setosa][4.9,3.0,1.4,0.2,Iris-setosa]\r\n feature.append(jay[x]) #Append the list \"s_l\" with the 1st value in each of the newly created lists\r\n for i in feature: # for loop - iterates through all the values in the list \"feature\"\r\n try: # try will try the following condition for all the values of i in feature\r\n attribute.append(float(i)) # Convert all the values in \"s_l\" into float and append them to a new list called \"sepal_length\"\r\n except: # if try does not work the loop will move to \"except\" and will attempt the contained code\r\n attribute.append(i.rstrip()) # strips the \"\\n\" from the string values for iris class in column 5\r\n if x == 0: # if statement - condition of x equal to 0 has to be met for the contained code to be executed\r\n sepal_length = [] # list for sepal length created\r\n sepal_length = attribute # makes the list \"sepal_length\" have all the same values as \"attribute\"\r\n return(sepal_length) # return the list \"sepal_length\"\r\n elif x == 1: # elif statement which does the same as the above if statement but only if the condition x equal to 1 is met. All other elif statements below are similar.\r\n sepal_width = []\r\n sepal_width = attribute\r\n return(sepal_width)\r\n elif x == 2:\r\n petal_length = []\r\n petal_length = attribute\r\n return(petal_length)\r\n elif x == 3:\r\n petal_width = []\r\n petal_width = attribute\r\n return(petal_width)\r\n elif x == 4:\r\n iris_class = []\r\n iris_class = attribute\r\n return(iris_class)\r\n else: # if none of the conditions above are met the else condition is met and will execute the below code\r\n return(\"No dice! This data set contains coulmn 0 to 4, try a number between 0 and 4\") # Returns string telling user to try again\r", "def calcDataGini(self, data, structure):\n classIndex, result, lenData = structure['class']['index'], 1, len(data)\n for value in structure['class']['values']:\n newData = list(filter(lambda x: x[classIndex] == value, data))\n p = len(newData) / lenData if lenData > 0 else 1\n result -= (p*p)\n return round(result, 3)", "def gini_index(Y):\n\n if (isinstance(Y, list) == False):\n temp_Y = Y.tolist()\n else:\n temp_Y = Y\n total_samples = len(temp_Y)\n\n temp = np.unique(Y, return_counts=True)\n\n Y_count = list(temp[1])\n Y_unique = list(temp[0])\n\n ans = 1\n\n for attr in Y_unique:\n g = Y_count[Y_unique.index(attr)] / total_samples\n ans -= (g**2)\n\n return ans", "def entropyWithBestSplit(attr, X, y):\n df = X[attr]\n maxVal = df.max()\n minVal = df.min()\n\n # Square root decomposition\n r = math.ceil(math.sqrt(maxVal - minVal))\n entropyVal = float('inf')\n splitAt = -1\n\n iterNum = 0\n while r > MIN_BIN_SIZE and iterNum < MAX_ITER:\n iterNum += 1\n i = minVal\n while i < maxVal:\n leftSplit = df[df < i].index.tolist()\n rightSplit = df[df >= i].index.tolist()\n buff_entropy = entropy(df, leftSplit) *\\\n (len(leftSplit) / (len(leftSplit) + len(rightSplit)))\n buff_entropy += entropy(df, rightSplit) *\\\n (len(rightSplit) / (len(leftSplit) + len(rightSplit)))\n if entropyVal >= buff_entropy:\n entropyVal = buff_entropy\n splitAt = i\n i += r\n minVal = min([splitAt - r, minVal])\n maxVal = max([splitAt + r, maxVal])\n r_buff = math.ceil(math.sqrt(maxVal - minVal))\n if r_buff >= r:\n break\n else:\n r = r_buff\n finalIdxLists = [\n df[df < splitAt].index.tolist(),\n df[df >= splitAt].index.tolist()\n ]\n return [splitAt], finalIdxLists, entropyVal", "def gini(array): # https://github.com/oliviaguest/gini/blob/master/gini.py, accessed 02/7/2019\n # based on bottom eq:\n # http://www.statsdirect.com/help/generatedimages/equations/equation154.svg\n # from:\n # http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm\n # All values are treated equally, arrays must be 1d:\n array = array.flatten()\n if np.amin(array) < 0:\n # Values cannot be negative:\n array -= np.amin(array)\n # Values cannot be 0:\n array += 0.0000001\n # Values must be sorted:\n array = np.sort(array)\n # Index per array element:\n index = np.arange(1, array.shape[0] + 1)\n # Number of array elements:\n n = array.shape[0]\n # Gini coefficient:\n return (np.sum((2 * index - n - 1) * array)) / (n * np.sum(array))", "def gini_gain(Y, attr):\n Y = Y.tolist()\n attr = attr.tolist()\n attr_set = set(attr)\n attr_set = list(attr_set)\n initial_gain=0\n for i in attr_set:\n l = []\n for j in range (len(attr)):\n if attr[j] == i:\n l.append(Y[j])\n initial_gain = initial_gain+(len(l)/len(Y))*gini_index(l)\n return initial_gain", "def convert_attribute(df):\n\n df['Popularity'] = ['Oversold' if x == '(1.0, 2.0]' else 'Unpopular' if x == '(-1.0, 0.0]' or x == '(0.0, 0.5]' else 'Popular' for x in df['percent_of_cap_binRange1']]\n\n return df", "def get_split_goodness_fit_continuous(\n arr: np.ndarray, y: np.ndarray, split: float, eval_func: Callable\n ):\n # Get above and below the split value\n above = arr >= split\n below = arr < split\n\n # get weighted average eval_func on the splits\n n_above = np.sum(above)\n above_eval = (\n eval_func(y[above]) * n_above / len(y)\n ) # weight = frac points in above\n below_eval = (\n eval_func(y[below]) * (len(y) - n_above) / len(y)\n ) # weight = frac points not in above\n\n # returns weighted sum of eval_func across splits, and the gain ratio denominator\n return (\n above_eval + below_eval,\n -1\n * sum(\n map(\n lambda x: x * np.log(x),\n [n_above / len(y), (len(y) - n_above) / len(y)],\n )\n ),\n )", "def calcGainRatioSplitByColumn(self, data, structure, colIName):\n splitInfo, colIndex = 0, structure[colIName]['index']\n for value in structure[colIName]['values']:\n newData = list(filter(lambda x: x[colIndex] == value, data))\n p = len(newData) / len(data) if len(newData) != 0 else 1\n splitInfo += (-1) * p * log2(p)\n splitInfo = 1 if splitInfo == 0 else splitInfo\n return round(self.calcInfoGainByColumnSplit(data, structure, colIName) / splitInfo, 3)", "def get_discrete_split_value(arr: np.ndarray, y: np.ndarray, eval_func: Callable):\n\n # First element is the weighted average eval_func of the split\n # Second term is the intrinsic value to penalize many splits.\n return (\n sum(\n [\n eval_func(y[arr == value]) * np.sum(arr == value) / len(y)\n for value in set(arr)\n ]\n ),\n -1\n * sum(\n [\n pipe(\n np.sum(arr == value) / len(y),\n lambda ratio: ratio * np.log(ratio),\n )\n for value in set(arr)\n ]\n ),\n )", "def gini(array):\n # based on bottom eq:\n # http://www.statsdirect.com/help/generatedimages/equations/equation154.svg\n # from:\n # http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm\n # All values are treated equally, arrays must be 1d:\n array = np.array(array, dtype=np.float64)\n array = np.abs(array.flatten())\n if np.amin(array) < 0:\n # Values cannot be negative:\n array -= np.amin(array)\n # Values cannot be 0:\n array += 0.0000001\n # Values must be sorted:\n array = np.sort(array)\n # Index per array element:\n index = np.arange(1, array.shape[0] + 1)\n # Number of array elements:\n n = array.shape[0]\n # Gini coefficient:\n return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array)))", "def _information_gain(self, y, X_column, split_thersh):\n # parent E\n parent_entropy = entropy(y)\n # generate split\n left_idxs, right_idxs = self._split(X_column, split_thersh)\n\n if len(left_idxs) == 0 or len(right_idxs) == 0:\n return 0\n # weighted avg child E\n n = len(y)\n n_left_samples, n_right_samples = len(left_idxs), len(right_idxs)\n entropy_left, entropy_right = entropy(y[left_idxs]), entropy(y[right_idxs])\n child_entropy = (n_left_samples/n) * entropy_left + (n_right_samples/n) * entropy_right\n\n # return IG\n ig = parent_entropy - child_entropy\n return ig", "def findBestColumnSplitByGini(self, data, structure):\n minGini, bestSplit = 1, None\n for colName in list(structure.keys())[:-1]:\n giniSplit = self.calcGiniSplitByColumn(data, structure, colName)\n if giniSplit <= minGini:\n minGini = giniSplit\n bestSplit = colName\n return bestSplit", "def get_aggr(self, attribute, aggregator=None, smooth=0., **kwargs):\n if aggregator is None:\n aggregator = np.mean\n data = self.get_all()\n itrs = {row['Iteration'] for row in data}\n itrs = sorted(list(itrs))\n vals = []\n running_avg = 0\n for itr in itrs:\n itr_data = DatasetBuilder(data).filter_itr(itr).get_all()\n val = aggregator([row[attribute] for row in itr_data], **kwargs)\n if len(vals) == 0:\n running_avg = val\n else:\n running_avg = smooth * running_avg + (1 - smooth) * val\n vals.append(running_avg)\n return np.array(itrs), np.array(vals)", "def splitData(df, split):\n train = df.iloc[:int(len(df)*split)]\n test = df.iloc[int(len(df)*split):]\n \n return train, test", "def split_by_attribute(dbsession, group, attr):\n values = []\n for item in group.items:\n if attr in item.attributes and item.attributes[attr]:\n values.extend(item.attributes[attr])\n categories = [\n (v, c) for v, c in Counter(values).most_common() if c < len(group.items) * 0.6666 and c >= 15 # noqa: PLR2004\n ]\n if categories:\n category_values = [v for v, _ in categories]\n has_values = 0\n for item in group.items:\n found = False\n for value in item.attributes[attr]:\n if value in category_values:\n found = True\n break\n if found:\n has_values = has_values + 1\n if has_values / len(group.items) > 0.9: # noqa: PLR2004\n categories.reverse()\n for category in categories:\n new_group = Group(\n value=category[0], label=f\"{group.label} - {category[0]}\", parent=group, split=\"attribute\"\n )\n dbsession.add(new_group)\n for item in list(group.items):\n if category[0] in item.attributes[attr]:\n item.group = new_group\n new_group = Group(value=group.label, label=group.label, parent=group, split=\"attribute\")\n dbsession.add(new_group)\n for item in list(group.items):\n item.group = new_group\n return True\n return False", "def informationGain2(data, attribute):\n \n split_data = splitBy(data, attribute) \n weighted_entropies = 0\n \n for set in split_data:\n weighted_entropies += len(set) / len(data) * entropy2(set) \n \n columnIG = entropy2(data) - weighted_entropies\n \n return columnIG", "def get_split(self,X,y):\n \n BEST_COL = 0\n BEST_SPLIT =0\n BEST_IMPUR = 99\n for i,feature in enumerate(X.T):\n arg_sort=np.argsort(feature) #Sort the feature for optimizing the find of splitting points\n feature= feature[arg_sort]\n y_sort = y[arg_sort]\n splits = self.possible_splits(feature,y_sort) #Get \n\n impur,splits = self.test_split(feature,y_sort,splits) #Get impurity for splitting points\n best_idx = np.argmin(impur)\n best_impur = impur[best_idx]\n \n if best_impur==0.0: #Found perfect split, terminate\n return(i,splits[best_idx])\n elif best_impur<BEST_IMPUR:\n BEST_IMPUR=best_impur\n BEST_SPLIT=splits[best_idx]\n BEST_COL=i\n return (BEST_COL,BEST_SPLIT)", "def __get_split_feature(self, data_set, target_feature, tree_features):\n\n if self.__criterion == 'entropy':\n feature_gains = {feature: self.__gain(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = max(feature_gains, key=feature_gains.get)\n return split_feature\n elif self.__criterion == 'gini':\n feature_ginis = {feature: self.__gini(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = min(feature_ginis, key=feature_ginis.get)\n return split_feature\n # TODO: I should check this (gini index).", "def compute_average_value(self, set_label, feature, group_by_features, low_dt=None, high_dt=None):\n assert ((low_dt is None) and (high_dt is None)) or ((low_dt is not None) and (high_dt is not None))\n tt = set_label + '_transaction'\n it = set_label + '_identity'\n feature = \"foo.\" + feature\n group_by_features = [\"foo.\" + item for item in group_by_features]\n group_by_features_str = \", \".join(group_by_features)\n view_table_sub = \"(SELECT * FROM {0} JOIN {1} USING (transactionid))\".format(tt, it)\n sql = \"SELECT \" + group_by_features_str + \", AVG(\"+ feature + \") FROM \"\n sql += view_table_sub + \" AS foo\"\n if low_dt is not None:\n assert low_dt <= high_dt\n sql += \" WHERE foo.transactiondt>={0} AND foo.transactiondt<{1}\".format(low_dt, high_dt)\n sql += \" GROUP BY \" + group_by_features_str\n sql +=\";\"\n cur = self.dbinstance.execute_sql(sql)\n return cur", "def test_gini_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1]), set([2])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1, 2:2})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.66)", "def createBinsByGiniIndex(self, data, structure, colIndex, numOfBins):\n splits = self.miningCalculator.getListWithBestValueSplitsOfDataByGini(data, structure, colIndex, numOfBins - 1)\n splits.sort()\n bins = {\"value<=\" + str(splits[0]): lambda x: x <= splits[0]}\n if len(splits) > 1:\n for i in range(1, numOfBins - 1):\n bins[str(splits[i - 1]) + '<value<=' + str(splits[i])] = (lambda x: splits[i - 1] < x <= splits[i])\n bins[\"value>\" + str(splits[len(splits) - 1])] = (lambda x: x > splits[len(splits) - 1])\n return bins", "def calc_gini_impurity(self, data):\n impurity = 1\n label_counts = get_value_counts(data)\n for label in label_counts.keys():\n impurity -= (label_counts[label]/len(data))**2 # nrows\n return impurity", "def calc_stat_values(self):", "def __divideset(rows, column, value):\n split_function = None #Initialize a variable split function.\n if isinstance(value, int) or isinstance(value, float): #Check if value is a number\n #True = the observation's value >= to the splitting criteria. False otherwise\n split_function = lambda row: row[column] >= value\n else:\n #If value is a string, True is where the observation's value == the criteria\n split_function = lambda row:row[column] == value\n \n #Divide the rows into two sets and return them\n set1 = [row for row in rows if split_function(row)]\n set2 = [row for row in rows if not split_function(row)]\n return (set1, set2)", "def test_gini_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.5)", "def gini_index(self, y_true=None, y_pred=None, decimal=None, **kwargs):\n y_true, y_pred, binary, representor, decimal = self.get_processed_data(y_true, y_pred, decimal)\n # Calculate class probabilities\n total_samples = len(y_true)\n y_prob = np.zeros(total_samples)\n for idx in range(0, total_samples):\n if y_true[idx] == y_pred[idx]:\n y_prob[idx] = 1\n else:\n y_prob[idx] = 0\n positive_samples = np.sum(y_prob)\n negative_samples = total_samples - positive_samples\n p_positive = positive_samples / total_samples\n p_negative = negative_samples / total_samples\n # Calculate Gini index\n result = 1 - (p_positive ** 2 + p_negative ** 2)\n return np.round(result, decimal)", "def calcInfoGainBySplitValue(self, data, structure, colName, splitVal):\n result = self.calcDataEntropy(data, structure) - self.calcEntropyBySplitValue(data, structure, colName, splitVal)\n result = 0 if result < 0 else result\n return round(result, 3)", "def getValue(splits, featureName):\n for split in splits:\n if split.startswith(featureName):\n return split[split.find(\"=\")+1:]\n \n return None", "def gini_impurity(data):\n\n prob = []\n n = len(data.index) # total data points\n classes = sorted(data.iloc[:,-1].unique()) # get classes (avoids div by 0 problem later on)\n\n # iterate over classes and find probabilities for each class\n for c in classes:\n prob.append(len(data[data.iloc[:,-1] == c]) / n)\n \n gini = 1 - (sum([p**2 for p in prob]))\n \n return gini", "def get_model_values(self, i, df):\n if i == 0:\n df.participation = np.tile(self.participation, (61, 1)).transpose()\n abatement = self.abatement(df.gross_output[i], df.miu[i],\n df.backstop_growth[i],\n df.participation[i])\n damages = self.damages(df.gross_output[i],\n df.temp_atmosphere[i], abatement)\n output = self.output(df.gross_output[i], damages, abatement,\n df.temp_atmosphere[i])\n output_abate = self.output_abate(abatement, df.gross_output[i])\n return [abatement, damages, output, output_abate]", "def gini(rows):\n counts = class_counts(rows)\n print(counts)\n impurity = 1\n for lbl in counts:\n prob_of_lbl = counts[lbl] / float(len(rows))\n impurity -= prob_of_lbl**2\n return impurity", "def get_metric(self, data_row: pd.Series) -> float:", "def param_selection(df):\n n = df.count()\n numTrees = np.round(np.log10(n) * 100)\n maxDepth = np.round(np.log(n))\n minInstancesPerNode = np.round(np.log10(n) * (np.ceil(n / 500000) + 1))\n #maxBins = np.minimum(80, np.round(500 / np.log(n)))\n subsamplingRate = float(np.where(n > 500000, 0.6, 0.8))\n maxIter = np.round(np.log10(n) * 50)\n\n # minInstancesPerNode\n\n minInstancesPerNode = 200 if minInstancesPerNode > 200 else maxDepth\n minInstancesPerNode = 25 if minInstancesPerNode < 25 else minInstancesPerNode\n\n # maxDepth\n\n maxDepth = 15 if maxDepth > 15 else maxDepth\n maxDepth = 3 if maxDepth < 3 else maxDepth\n\n # maxIter applies to GBT\n\n maxIter = 200 if maxIter > 100 else maxIter\n maxIter = 50 if maxIter < 50 else maxIter\n\n # maxBins set to 32\n\n maxBins = 32\n\n print \"[Info] numTrees: \" + str(numTrees)\n print \"[Info] maxDepth: \" + str(maxDepth)\n print \"[Info] minInstancesPerNode: \" + str(minInstancesPerNode)\n print \"[Info] maxBins: \" + str(maxBins)\n print \"[Info] subsamplingRate: \" + str(subsamplingRate)\n print \"[Info] maxIter: \" + str(maxIter)\n\n return numTrees, maxDepth, minInstancesPerNode, maxBins, subsamplingRate, maxIter", "def fillDictWithBestValueSplitsOfDataByGini(self, data, structure, colIndex, numOfSplits, splitsList, indexToInsert):\n if len(data) <= 0 or numOfSplits <= 0:\n return []\n split = self.findBestValueSplitByGini(data, structure, colIndex)\n if str(indexToInsert) in splitsList:\n splitsList[str(indexToInsert)] += [split]\n else:\n splitsList[str(indexToInsert)] = [split]\n indexToInsert, numOfSplits = indexToInsert + 1, numOfSplits - 1\n\n if split:\n newDataBellowSplit = list(filter(lambda y: float(y[colIndex]) <= split[0], data))\n newDataAboveSplit = list(filter(lambda y: float(y[colIndex]) > split[0], data))\n self.fillDictWithBestValueSplitsOfDataByGini(newDataBellowSplit, structure, colIndex, numOfSplits, splitsList, indexToInsert)\n self.fillDictWithBestValueSplitsOfDataByGini(newDataAboveSplit, structure, colIndex, numOfSplits, splitsList, indexToInsert)", "def split(self, attribute):\n if attribute not in self.attributes:\n raise KeyError('Attribute not present in node')\n \n self.split_attr = attribute\n \n # list() is used to make a copy of the list instead of pointing to the same list\n child_attributes = list(self.attributes)\n child_attributes.remove(attribute)\n \n child1_ancestors = list(self.ancestors)\n child0_ancestors = list(self.ancestors)\n child1_ancestors.append(attribute_value(attribute, 1))\n child0_ancestors.append(attribute_value(attribute, 0))\n \n self.val1 = Node(child_attributes, child1_ancestors, self.data, self.heuristic)\n self.val0 = Node(child_attributes, child0_ancestors, self.data, self.heuristic)", "def getSplitFunc(self, splitType):\n if splitType.upper() == \"INFO GAIN\":\n return self.findBestColumnSplitByInfoGain\n elif splitType.upper() == \"GAIN RATIO\":\n return self.findBestColumnSplitByGainRatio\n elif splitType.upper() == \"GINI INDEX\":\n return self.findBestColumnSplitByGini\n return None", "def imet(self, i) -> pd.DataFrame:\n if i > self.nmetgauges:\n raise IndexError('Gauge index higher than number of gauges')\n else:\n return self.metgauges[list(self.metgauges.keys())[i]]['Data']", "def get_groupby(line_field):\n if line_field and '[' in line_field and ']' in line_field:\n i = line_field.index('[')\n j = line_field.index(']')\n groupby = literal_eval(line_field[i:j+1])\n return groupby\n return False", "def gini(self, rows):\n counts = self.class_counts(rows)\n impurity = 1\n for lbl in counts:\n prob_of_lbl = counts[lbl] / float(len(rows))\n impurity -= prob_of_lbl**2\n return impurity", "def get_gini(self, rows):\n label_count = defaultdict(int)\n total_count = 0\n for row in rows:\n label = row[self.target_attribute]\n label_count[label] += 1\n total_count += 1\n return 1 - sum([np.square(float(label_count[label])/total_count) for label in label_count.keys()])", "def attribute_value(info_df: DataFrame, attribute: str) -> float | str | int:\n row = info_df[info_df[\"Attribute Name\"] == attribute].values[0]\n value = row[-1]\n value_type = row[-2]\n if value_type == \"int\":\n return int(value)\n if value_type == \"String\":\n return value\n return float(value)", "def usefulquantities(dffin):\n dffin['log_length_box'] = np.log(dffin['length_box_um'])\n dffin['time_min']=dffin['time_sec']/60\n dffin['pred_length_box_um'] = np.exp(dffin['pred_log_length'])\n dffin['unique_id'] = dffin['cell']+dffin['time_sec'].apply(lambda x:str(x))\n dffin['cv_gr']= dffin.groupby('cell')['pred_growth_rate'].transform(lambda x:\\\n np.std(x)/np.mean(x))\n dffin['std_gr']= dffin.groupby('cell')['pred_growth_rate'].transform(lambda x: np.std(x))\n dffin['mean_gr'] = dffin.groupby('cell')['pred_growth_rate'].transform(lambda x: np.mean(x))\n dffin['mean_len'] = dffin.groupby('cell')['pred_length_box_um'].transform(lambda x: np.mean(x))\n dffin['norm_pred_growth_rate'] = (dffin['pred_growth_rate']-dffin.groupby('cell')['pred_growth_rate'].transform(lambda\\\n x: np.mean(x)))/dffin.groupby('cell')['pred_growth_rate'].transform(lambda x: np.mean(x))\n dffin = rl.genalogy(dffin,'parent_cell') #Create genealogy\n dffin = rl.genalogy(dffin,'g_parent_cell')\n dffin = rl.genalogy(dffin,'g_g_parent_cell')\n dffin = dffin.set_index('unique_id')\n qq= dffin.groupby('cell').apply(lambda x: (x['pred_length_box_um']-x['pred_length_box_um'].iloc[0])/(x['pred_length_box_um'].iloc[-1]-x['pred_length_box_um'].iloc[0])).rename('add_len')\n jj= dffin.groupby('cell').apply(lambda x: (x['time_sec']-x['time_sec'].iloc[0])/(x['time_sec'].iloc[-1]-x['time_sec'].iloc[0])).rename('cell_cycle')\n return pd.concat([dffin, qq.reset_index().set_index('unique_id')['add_len'], jj.reset_index().set_index('unique_id')['cell_cycle']], axis=1, join='inner')", "def splitmetric(self, dataset, attr, target_attr):\n raise NotImplementedError('Subclass should implement this method')", "def get_split_goodness_fit_continuous (\n feature_array: np.ndarray, target_array: np.ndarray, split: float, evaluate_function: Callable\n ):\n # Get above and below the split value\n above = feature_array >= split\n below = feature_array < split\n\n # Get weighted average evaluate_function on the splits\n n_above = np.sum ( above )\n above_eval = (\n evaluate_function ( target_array [ above ] ) * n_above / len ( target_array )\n ) # Weight = frac points in above\n below_eval = (\n evaluate_function ( target_array [ below ] ) * ( len ( target_array ) - n_above ) / len ( target_array )\n ) # Weight = frac points not in above\n\n # returns weighted sum of evaluate_function across splits & the gain ratio denominator\n return (\n above_eval + below_eval,\n -1\n * sum (\n map (\n lambda x: x * np.log ( x ),\n [ n_above / len ( target_array ), ( len ( target_array ) - n_above ) / len ( target_array ) ],\n )\n ),\n ) # End get_split_goodness_fit_continuous", "def splitBy(data, attribute_id):\n \n col = getColumn(data, attribute_id)\n values = set(col)\n split_data = [] \n for i in values:\n subset = [row for row in data if row[attribute_id] == i]\n split_data.append(subset)\n \n return split_data", "def mineral_value(attribute):\n return attribute[1]", "def icdtoelixcomo(df,col_icd):\n elixcomo = loadelixcomo()\n unqcomos = elixcomo['Comorbidity'].unique()\n df['ElixComo']=None\n df['ElixComoScore']=None\n for como in unqcomos:\n icdlist = tuple(elixcomo.loc[elixcomo['Comorbidity']==como,'ICD'])\n comoidx = df[col_icd].str.startswith(icdlist,na=False)\n df.loc[comoidx,'ElixComo']=como\n df.loc[comoidx,'ElixComoScore']=elixcomo.loc[elixcomo.Comorbidity==como,'Score'].values[0]\n return df", "def get_discrete_split_value ( feature_array: np.ndarray, target_array: np.ndarray, evaluate_function: Callable ):\n\n # First element is the weighted average evaluate_function of the split\n # Second term is the intrinsic value to penalize many splits.\n return (\n sum (\n [\n evaluate_function ( target_array [ feature_array == value ] ) * np.sum ( feature_array == value ) / len ( target_array )\n for value in set ( feature_array )\n ]\n ),\n -1\n * sum (\n [\n pipe (\n np.sum ( feature_array == value ) / len ( target_array ),\n lambda ratio: ratio * np.log ( ratio ),\n )\n for value in set ( feature_array )\n ]\n ),\n ) # End get_discrete_split_value()", "def giniImpurity(rows, resCol=None):\n if not resCol: #create the dictionary of counts for each class using pure python\n total = len(rows)\n counts = __uniqueCounts(rows)\n else: #Create the dictionary of counts for each class using pandas.\n assert 'index' in dir(rows)\n total = len(rows)\n counts = __uniqueCountsPandas(rows, resCol)\n imp = 1 #Initialize the gini-impurity at 1\n #Implement the formula for calculating gini-impurity\n fracs = [float(x)/total for x in counts.values()]\n for x in fracs:\n imp -= x*x\n return imp", "def find_split(self, X, y):\n choices = y.size\n if choices <= 1:\n return None, None\n\n # find the number of each option in the current node.\n options_parent = [np.sum(y == c) for c in range(self.num_outcomes)]\n\n # find the gini of current node.\n best_gini = 1.0 - sum((n / choices) ** 2 for n in options_parent)\n best_idx, best_split = None, None\n\n # loop through the features to get splits and options.\n for idx in range(self.num_features):\n splits, options = zip(*sorted(zip(X[:, idx], y)))\n\n num_left = [0] * self.num_outcomes\n num_right = options_parent.copy()\n for i in range(1, choices):\n c = options[i - 1]\n num_left[c] += 1\n num_right[c] -= 1\n gini_left = 1.0 - sum(\n (num_left[x] / i) ** 2 for x in range(self.num_outcomes)\n )\n gini_right = 1.0 - sum(\n (num_right[x] / i) ** 2 for x in range(self.num_outcomes)\n )\n\n gini = (i * gini_left + (choices - i) * gini_right) / choices\n\n if splits[i] == splits[i - 1]:\n continue\n\n if gini < best_gini:\n best_gini = gini\n best_idx = idx\n best_split = (splits[i] + splits[i - 1]) / 2\n\n return best_idx, best_split", "def iobs(self, i) -> pd.DataFrame:\n if i > self.nobsgauges:\n raise IndexError('Gauge index higher than number of gauges')\n else:\n return self.obsgauges[list(self.obsgauges.keys())[i]]['Data']", "def calcumul_index(path,x,name_champ_label,indice2,list_drop,pathlist_names_feature):\n sql=sqlite3.connect(path)\n df=pd.read_sql_query(\"SELECT * FROM output\", sql)\n df=df.groupby(\"originfid\").mean()\n if 'band' in df.columns[6] :\n globals()[\"df%s\"%x]=col_sqlite(path,x,list_drop,pathlist_names_feature)\n label = globals()[\"df%s\"%x][name_champ_label]\n globals()[\"%s\"%x]=globals()[\"df%s\"%x].astype(float)\n print(indice2)\n if indice2 not in ['NDVI', 'NDWI','SM','asc_vv','des_vv','asc_vh','des_vh','asc_userfeature1','des_userfeature1']:\n name_indice=indice2\n band1_indice=input(\"band ? :\")\n band2_indice=input(\"band ? :\")\n df_b1 = globals()[\"%s\"%x].filter(like=band1_indice)\n df_b2 = globals()[\"%s\"%x].filter(like=band2_indice)\n df_b1_col = df_b1.rename(columns=lambda x: x[-8:])\n df_b2_col = df_b2.rename(columns=lambda x: x[-8:])\n df_indice = (df_b2_col - df_b1_col)/(df_b2_col + df_b1_col)\n globals()[\"df_%s\"%indice2] = df_indice.cumsum(axis=1)\n \n else:\n df_indice = globals()[\"df%s\"%x].filter(like=indice2)\n df_indice_col = df_indice.rename(columns=lambda x: x[-8:])\n df_indice_col=df_indice_col.iloc[:-1]\n globals()[\"df_%s\"%indice2] = df_indice_col.cumsum(axis=1)\n \n globals()[\"df_%s\"%indice2][name_champ_label]=label\n globals()[\"df_%s\"%indice2]=globals()[\"df_%s\"%indice2].astype(float)\n globals()[\"df_mean_%s\"%indice2]=globals()[\"df_%s\"%indice2].groupby(name_champ_label).mean().T \n globals()[\"df_mean_%s\"%indice2].index=pd.to_datetime(globals()[\"df_mean_%s\"%indice2].index,format=\"%Y%m%d\")\n else :\n label = df[name_champ_label]\n print(indice2)\n if indice2 not in ['ndvi', 'ndwi','asc_vv','des_vv','asc_vh','des_vh','asc_userfeature1','des_userfeature1','SM']:\n name_indice=indice2\n band1_indice=input(\"band ? :\")\n band2_indice=input(\"band ? :\")\n df_b1 = df.filter(like=band1_indice)\n df_b2 = df.filter(like=band2_indice)\n df_b1_col = df_b1.rename(columns=lambda x: x[-8:])\n df_b2_col = df_b2.rename(columns=lambda x: x[-8:])\n df_indice = (df_b2_col - df_b1_col)/(df_b2_col + df_b1_col)\n globals()[\"df_%s\"%indice2] = df_indice.cumsum(axis=1)\n \n else:\n df_indice = df.filter(like=indice2)\n df_indice_col = df_indice.rename(columns=lambda x: x[-8:])\n globals()[\"df_%s\"%indice2] = df_indice_col.cumsum(axis=1)\n \n globals()[\"df_%s\"%indice2][name_champ_label]=label\n globals()[\"df_mean_%s\"%indice2]=globals()[\"df_%s\"%indice2].groupby(name_champ_label).mean().T\n globals()[\"df_mean_%s\"%indice2].index=pd.to_datetime(globals()[\"df_mean_%s\"%indice2].index,format=\"%Y%m%d\")\n return globals()[\"df_mean_%s\"%indice2], globals()[\"df_%s\"%indice2]", "def per_gene_coverage(genes,df):\n\n sub_genes =[]\n\n #For every gene in the list, check the average coverage, if less than 100 add it to the final list.\n for gene in genes:\n coverage = average(df[df['GeneSymbol;Accession'] == gene]['percentage30'])\n\n if coverage < 100:\n sub_genes.append([gene.split(';')[0],round(coverage,2)])\n \n return sub_genes", "def get_interest_variable(\n in_dataset, sensor_var, date_col, hr_col, numeric_var, target_sensor=\"A620\"\n):\n dataset_pproc = in_dataset.loc[\n in_dataset[sensor_var] == target_sensor, [date_col, hr_col] + [numeric_var]\n ]\n hrs_str = dataset_pproc[hr_col].to_string()\n dates_str = dataset_pproc[date_col]\n\n dataset_pproc[date_col] = pd.to_datetime(dataset_pproc[date_col])\n dataset_pproc.set_index([date_col, hr_col], inplace=True)\n dataset_pproc.fillna(method=\"ffill\", inplace=True)\n dataset_pproc.interpolate(method=\"linear\", axis=0)\n\n return dataset_pproc", "def getInterval(self) -> float:\n\t\treturn self[self._bcni]", "def gini(y):\n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n pk = np.mean(y, axis=0)\n \n return 1 - np.sum(pk * pk)", "def at_birth(df,variable,npoint):\n return df.groupby('cell')[['{}'.format('{}'.format(variable)),'pred_growth_rate']].apply(lambda x: x.head(npoint).mean()).rename(columns={'pred_length_box_um':'{}_at_birth'.format(variable)})", "def rsi(df, lag):\n\n def avg_gain():\n gains = [\n df[i][\"c\"] - df[i - 1][\"c\"] if df[i][\"c\"] >= df[i - 1][\"c\"] else 0.0\n for i in range(1, len(df))\n ]\n avg_gain = [sum(gains[:lag]) / float(lag)]\n [avg_gain.append(((avg_gain[-1] * 13) + gain) / 14.0) for gain in gains[lag:]]\n return avg_gain\n\n def avg_loss():\n losses = [\n abs(df[i][\"c\"] - df[i - 1][\"c\"]) if df[i][\"c\"] < df[i - 1][\"c\"] else 0.0\n for i in range(1, len(df))\n ]\n avg_loss = [sum(losses[:lag]) / float(lag)]\n [avg_loss.append(((avg_loss[-1] * 13) + loss) / 14.0) for loss in losses[lag:]]\n return avg_loss\n\n gains = avg_gain()\n losses = avg_loss()\n\n raw_rsi = [\n round(100 - (100 / (1 + (gains[i] / losses[i]))), 2) for i in range(len(gains))\n ]\n df = df[-1 * len(raw_rsi) :]\n\n return [raw_rsi[i] for i in range(len(df))]", "def __calculate_rsi(self,df):\n df['RSI']=round(ta.rsi(df[Config.PRICE_COL]),2)\n\n return df", "def calc(self,newValue):\n idx=np.searchsorted(self.quantiles, newValue, side=\"left\")\n if idx>=self.n-1:\n return idx/self.n\n if np.abs(newValue - self.quantiles[idx-1]) < np.abs(newValue - self.quantiles[idx]):\n return (idx-1)/self.n\n else:\n return idx/self.n", "def eval(self, df):\n df_res = self.func(df)\n return df_res[self.out]", "def applyFilter(df: pandas.DataFrame, attribute:str, op: str, val: object) -> pandas.DataFrame: \n if (op == '='):\n return df[df[attribute] == val]\n elif (op == '<'):\n return df[df[attribute] < val]\n elif (op == '>'):\n return df[df[attribute] > val]\n elif (op == '<='):\n return df[df[attribute] <= val]\n elif (op == '>='):\n return df[df[attribute] >= val]\n elif (op == '!='):\n return df[df[attribute] != val]\n return df", "def GINE_helper(data):\n D_n = data.x[0].size(-1)\n D_e = data.edge_attr.size(-1)\n if D_n > D_e:\n lin = Linear(1, D_n)\n x = data.x\n edge_attr = lin(data.edge_attr)\n elif D_e > D_n:\n lin = Linear(1, D_e)\n x = lin(data.x)\n edge_attr = data.edge_attr\n\n return x, edge_attr", "def find_best_split(rows):\n best_gain = 0 # keep track of the best information gain\n best_question = None # keep train of the feature / value that produced it\n current_uncertainty = gini(rows)\n n_features = len(rows[0]) - 1 # number of columns\n #print(\"n_features:\", n_features)\n\n for col in range(1,n_features): # for each feature\n # for each iteration this is the set of all values of a specific column, eg, All pixels number 0\n values = set([row[col] for row in rows]) # unique values in the column\n for val in values: # for each value\n\n # Create a question object for each val under a column, holding the val and the col number\n question = Question(col, val)\n\n # try splitting the dataset\n true_rows, false_rows = partition(rows, question)\n\n # Skip this split if it doesn't divide the\n # dataset.\n if len(true_rows) == 0 or len(false_rows) == 0:\n continue\n\n # Calculate the information gain from this split\n gain = info_gain(true_rows, false_rows, current_uncertainty)\n\n # You actually can use '>' instead of '>=' here\n # but I wanted the tree to look a certain way for our\n # toy dataset.\n if gain >= best_gain:\n best_gain, best_question = gain, question\n\n return best_gain, best_question", "def get_agent_data(self, attribute):\n\t\treturn getattr(self, attribute)", "def pick_best_split(self,db,labels,ids,features=None):\n idlabels = [labels[id] for id in ids]\n if misclassification_error(idlabels) == 0:\n #base case: no misclassifications\n self.type = 'v'\n self.value = idlabels[0]\n return 0\n best = None\n bestCost = 0\n splitval = None\n discrete = True\n if features == None:\n if len(ids) < db.numFeatures():\n #look at all present features in the training set\n features = db.getPresentFeatures(ids)\n #print len(features),\"of\",db.numFeatures(),\"features selected\"\n else:\n features = range(db.numFeatures())\n elif callable(features):\n features = features()\n for i in features:\n if len(db.entryLists[i]) == 0: continue\n idiscrete = db.discreteFeature[i]\n if idiscrete:\n #count number of labels of a certain value\n splitter = defaultdict(lambda:defaultdict(int))\n #count of labels for missing values\n nmissing = defaultdict(int)\n for id in ids:\n val = db[i,id]\n if val is None:\n #missing values go down to all splits\n nmissing[labels[id]] += 1\n continue\n splitter[val][labels[id]] += 1\n if len(splitter) > continuous_variable_threshold:\n #print \"Determined to be a continuous variable\"\n idiscrete = False\n break\n if idiscrete:\n if len(splitter) <= 1:\n #only a single value\n continue\n #count number of missing values in all splits\n cmax = 0\n for k in splitter:\n for l,v in nmissing.iteritems():\n splitter[k][l] += v\n cmax = max(cmax,sum(splitter[k].values()))\n #shrink by fraction of (# of ids - largest child)/(# of ids)\n scale = (1.0-float(cmax)/float(len(ids)))*len(splitter)\n #evaluate cost\n cost = split_cost(splitter.values())*scale\n #print \"Split on\",i,\"information gain\",-cost,splitter.values()\n else:\n #continuous, need to learn the best split\n vals = []\n presentlabels = []\n nonelabels = []\n for id in ids:\n val = db[i,id]\n if val is None:\n nonelabels.append(labels[id])\n continue\n vals.append(val)\n presentlabels.append(labels[id])\n if len(vals) <= 1:\n print \"No values for feature\",i,\"?\"\n print vals\n continue\n #print \"Considering continuous split on\",i\n s,cost = best_split(vals,presentlabels,nonelabels)\n scale = (1.0-float(len(presentlabels)/2+len(nonelabels))/float(len(ids)))*2\n cost *= scale\n #print \"Result\",s,\"Information gain\",-cost\n \n if cost < bestCost:\n best = i\n bestCost = cost\n discrete = idiscrete\n if not idiscrete:\n splitval = s\n \n if best is None:\n self.type = 'v'\n if len(ids) > 0:\n self.value = vote(idlabels)\n return misclassification_error(idlabels)\n else:\n self.value = None\n return 0\n else:\n self.feature = best\n #discrete or inequality split\n if discrete:\n self.type = 's'\n else:\n self.type = 'i'\n self.value = splitval\n return bestCost", "def split_data(df, split_method='fo', test_size=.2, random_state=42):\n if split_method == 'fo':\n train_set, test_set = _split_fo(df,\n test_size=test_size,\n random_state=random_state)\n elif split_method == 'tfo':\n train_set, test_set = _split_tfo(df, test_size=test_size)\n elif split_method == 'ufo':\n train_set, test_set = _split_ufo(df,\n test_size=test_size,\n random_state=random_state)\n elif split_method == 'utfo':\n train_set, test_set = _split_utfo(df, test_size=test_size)\n else:\n raise HuitreError('Invalid data_split value, expect: ufo, utfo')\n train_set = train_set.reset_index(drop=True)\n test_set = test_set.reset_index(drop=True)\n return train_set, test_set", "def splitAttribute(self, atr, divider=0.5):\n big, lit = DecisionTree(None, self.atr), DecisionTree(None, self.atr)\n for d in self:\n if d[atr] > divider: big.append(d)\n else: lit.append(d)\n return lit, big", "def giniIndex(p_m1):\n G = p_m1*(1-p_m1)*2 \n return G", "def _split_dataset(self, X, y, label, index, value, sample_weights=None):\n # YOUR CODE HERE\n # Hint: Do not forget to remove the index-th feature from X.\n # begin answer\n ret1=[]\n ret2=[]\n featVec=X[:,index]\n X=X[:,[i for i in range(X.shape[1]) if i!=index ]]\n for i in range(len(featVec)):\n if featVec[i]>=value:\n ret1.append(i)\n else:\n ret2.append(i)\n sub1_X = X[ret1,:]\n sub1_y = y[ret1]\n label_1=label[ret1]\n sub1_sample_weights=sample_weights[ret1]\n sub2_X = X[ret2,:]\n sub2_y = y[ret2]\n label_2=label[ret2]\n sub2_sample_weights=sample_weights[ret2]\n # end answer\n return sub1_X, sub1_y, label_1, sub1_sample_weights, sub2_X, sub2_y, label_2, sub2_sample_weights", "def determine_best_split(data, potential_splits, mltask):\n\n first_iteration = True\n for column_index in potential_splits:\n for value in potential_splits[column_index]:\n data_below,data_above = split_data(data, column_index, value)\n \n if mltask == 'regression':\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_mse)\n \n # classification\n else:\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_entropy)\n \n \n if first_iteration or current_overall_metric <= best_overall_metric:\n first_iteration = False\n \n best_overall_metric = current_overall_metric\n best_split_column = column_index\n best_split_value = value\n \n \n return best_split_column,best_split_value", "def _getPMI(self, df, targetColname):\n pmi = 0\n search_term = df[targetColname]\n noofterms = len(search_term)\n startindex = 0\n pmiAccumulate = 0\n if(noofterms>1):\n for i in range(0,noofterms-1):\n pmi = self.computePMI(search_term[i],search_term[i+1])\n pmiAccumulate = pmiAccumulate+pmi\n pmiAccumulate = pmiAccumulate/noofterms\n pmi = pmiAccumulate\n return pmi", "def giniIndex(self, groups):\n n = sum([len(g) for g in groups])\n gini = 0.0\n for g in groups:\n if len(g) == 0: continue\n score = 0\n for c in self.classes:\n p = [r[-1] for r in g].count(c) / len(g)\n score += p * p\n gini += (1 - score) * len(g) / n\n return gini", "def getSplitAttr(self, data, attributes):\n splitAttrIndex = 0\n lengthAttr = len(attributes)\n del self.infoGain[:]\n index = 0\n while index < lengthAttr:\n self.infoGain.append(self.getInfoGain(data, index))\n index += 1\n\n for gain in self.infoGain:\n if gain == max(self.infoGain):\n break\n splitAttrIndex += 1\n return splitAttrIndex", "def example_run_function(run, df):\n\n df['fcreward'] = calc.glm_groups.fraction(run.parent, 'ensure-vdrive-plus')\n df['reward'] = calc.glm_groups.fraction(run.parent, 'ensure')\n\n return df", "def ivalue(self, idx):\n\n a = self.data[:,idx] / self.data[:,idx].sum()\n results = -(np.dot(a, np.log(a))) / np.log(len(self.data[:,idx]))\n return results", "def gini(values):\n v = values.copy()\n v.sort()\n\n sum_iy = 0\n for i, y in enumerate(v):\n i += 1\n sum_iy += i*y\n\n sum_y = sum(v)\n n = len(v)\n\n return 1 - (((2*sum_iy)/(n*sum_y)) - ((n+1)/n))", "def get_index(self, gi):\n for i in range(len(self.gradual_items)):\n gi_obj = self.gradual_items[i]\n if (gi.symbol == gi_obj.symbol) and (gi.attribute_col == gi_obj.attribute_col):\n return i\n return -1", "def test_split_cell_sets_new_tier_level(mock_amg):\n\n mock_amg.cells[0].split()\n\n assert mock_amg.cells[-4].tier == 1\n assert mock_amg.cells[-3].tier == 1\n assert mock_amg.cells[-2].tier == 1\n assert mock_amg.cells[-1].tier == 1\n\n mock_amg.cells[-1].split()\n assert mock_amg.cells[-4].tier == 2\n assert mock_amg.cells[-3].tier == 2\n assert mock_amg.cells[-2].tier == 2\n assert mock_amg.cells[-1].tier == 2", "def gbids(self, method=\"median\", pattr=None, **kwargs):\n import gblearn.selection as sel\n from functools import partial\n methmap = {\n \"median\": sel.median,\n \"cna\": partial(sel.cna_max, coord=0),\n\t \"cna_z\": partial(sel.cna_max, coord=2)\n }\n if method in methmap:\n extra = getattr(self, pattr) if pattr is not None else None\n return methmap[method](self.xyz, extra, types=self.types, **kwargs)", "def eval_iou_spacenet_csv(self, miniou=0.5, iou_field_prefix=\"iou_score\",\n imageIDField=\"ImageId\", debug=False, min_area=0):\n # Get List of all ImageID in both ground truth and proposals\n imageIDList = []\n imageIDList.extend(list(self.ground_truth_GDF[imageIDField].unique()))\n if not self.proposal_GDF.empty:\n imageIDList.extend(list(self.proposal_GDF[imageIDField].unique()))\n imageIDList = list(set(imageIDList))\n iou_field = iou_field_prefix\n scoring_dict_list = []\n self.ground_truth_GDF[iou_field] = 0.\n iou_index = self.ground_truth_GDF.columns.get_loc(iou_field)\n id_cols = 2\n ground_truth_ids = self.ground_truth_GDF.iloc[:, :id_cols]\n\n for imageID in tqdm(imageIDList):\n self.ground_truth_GDF_Edit = self.ground_truth_GDF[\n self.ground_truth_GDF[imageIDField] == imageID\n ].copy(deep=True)\n self.ground_truth_GDF_Edit = self.ground_truth_GDF_Edit[\n self.ground_truth_GDF_Edit.area >= min_area\n ]\n proposal_GDF_copy = self.proposal_GDF[self.proposal_GDF[\n imageIDField] == imageID].copy(deep=True)\n proposal_GDF_copy = proposal_GDF_copy[proposal_GDF_copy.area\n > min_area]\n if debug:\n print(iou_field)\n for _, pred_row in proposal_GDF_copy.iterrows():\n if debug:\n print(pred_row.name)\n if pred_row.geometry.area > 0:\n pred_poly = pred_row.geometry\n iou_GDF = iou.calculate_iou(pred_poly,\n self.ground_truth_GDF_Edit)\n # Get max iou\n if not iou_GDF.empty:\n max_index = iou_GDF['iou_score'].idxmax(axis=0,\n skipna=True)\n max_iou_row = iou_GDF.loc[max_index]\n # Update entry in full ground truth table\n previous_iou = self.ground_truth_GDF.iloc[\n max_index, iou_index]\n new_iou = max_iou_row[iou_field]\n if new_iou > previous_iou:\n self.ground_truth_GDF.iloc[max_index, iou_index] \\\n = new_iou\n if max_iou_row['iou_score'] > miniou:\n self.proposal_GDF.loc[pred_row.name, iou_field] \\\n = max_iou_row['iou_score']\n self.ground_truth_GDF_Edit \\\n = self.ground_truth_GDF_Edit.drop(\n max_iou_row.name, axis=0)\n else:\n self.proposal_GDF.loc[pred_row.name, iou_field] = 0\n else:\n self.proposal_GDF.loc[pred_row.name, iou_field] = 0\n else:\n self.proposal_GDF.loc[pred_row.name, iou_field] = 0\n if debug:\n print(self.proposal_GDF.loc[pred_row.name])\n\n if self.proposal_GDF.empty:\n TruePos = 0\n FalsePos = 0\n else:\n proposal_GDF_copy = self.proposal_GDF[\n self.proposal_GDF[imageIDField] == imageID].copy(deep=True)\n proposal_GDF_copy = proposal_GDF_copy[\n proposal_GDF_copy.area > min_area]\n if not proposal_GDF_copy.empty:\n if iou_field in proposal_GDF_copy.columns:\n TruePos = proposal_GDF_copy[\n proposal_GDF_copy[iou_field] >= miniou].shape[0]\n FalsePos = proposal_GDF_copy[\n proposal_GDF_copy[iou_field] < miniou].shape[0]\n else:\n print(\"iou field {} missing\".format(iou_field))\n TruePos = 0\n FalsePos = 0\n else:\n print(\"Empty Proposal Id\")\n TruePos = 0\n FalsePos = 0\n\n # false negatives is the number of objects remaining in ground\n # truth after pulling out matched objects\n FalseNeg = self.ground_truth_GDF_Edit[\n self.ground_truth_GDF_Edit.area > 0].shape[0]\n if float(TruePos+FalsePos) > 0:\n Precision = TruePos / float(TruePos + FalsePos)\n else:\n Precision = 0\n if float(TruePos + FalseNeg) > 0:\n Recall = TruePos / float(TruePos + FalseNeg)\n else:\n Recall = 0\n if Recall * Precision > 0:\n F1Score = 2*Precision*Recall/(Precision+Recall)\n else:\n F1Score = 0\n\n score_calc = {'imageID': imageID,\n 'iou_field': iou_field,\n 'TruePos': TruePos,\n 'FalsePos': FalsePos,\n 'FalseNeg': FalseNeg,\n 'Precision': Precision,\n 'Recall': Recall,\n 'F1Score': F1Score\n }\n scoring_dict_list.append(score_calc)\n\n return scoring_dict_list", "def __info_gain_from_splits(self, potential_integer_splits, sorted_data):\n info_gains = []\n for split in map(int, potential_integer_splits):\n left_child = sorted_data[sorted_data[:, 0].astype(int) < split, :]\n right_child = sorted_data[sorted_data[:, 0].astype(int) >= split, :]\n info_gains.append(self.__calc_info_gain(sorted_data, left_child,\n right_child))\n return info_gains", "def getSplitDetectorSignal(self):\r\n\t\treturn self.splitData", "def test_gini_coeff():\n n = 10000\n\n # Tests Pareto: G = 1 / (2*a - 1)\n a = np.random.randint(2, 15)\n expected = 1 / (2 * a - 1)\n\n y = (np.random.pareto(a, size=n) + 1) * 2\n coeff = gini_coefficient(y)\n assert_allclose(expected, coeff, rtol=1e-01)\n\n # Tests Weibull: G = 1 - 2**(-1/a)\n a = np.random.randint(2, 15)\n expected = 1 - 2 ** (-1 / a)\n\n y = np.random.weibull(a, size=n)\n coeff = gini_coefficient(y)\n assert_allclose(expected, coeff, rtol=1e-01)", "def isi(spiketrain, axis=-1):\n if isinstance(spiketrain, neo.SpikeTrain):\n intervals = np.diff(spiketrain.magnitude, axis=axis)\n # np.diff makes a copy\n intervals = pq.Quantity(intervals, units=spiketrain.units, copy=False)\n else:\n intervals = np.diff(spiketrain, axis=axis)\n if (intervals < 0).any():\n warnings.warn(\"ISI evaluated to negative values. \"\n \"Please sort the input array.\")\n\n return intervals" ]
[ "0.6597128", "0.6406698", "0.62779886", "0.6191937", "0.567172", "0.5599756", "0.5579262", "0.5560474", "0.53938425", "0.5372687", "0.5327218", "0.5243013", "0.5231648", "0.52305394", "0.50356853", "0.5020019", "0.49910983", "0.49304187", "0.49144533", "0.4894045", "0.48931786", "0.48574132", "0.48529804", "0.4814904", "0.4783039", "0.47676802", "0.47244725", "0.47196734", "0.47153467", "0.4714126", "0.4712734", "0.4711269", "0.4707705", "0.47068396", "0.46986854", "0.46937746", "0.46842182", "0.46717668", "0.46695092", "0.4667205", "0.4643798", "0.46283463", "0.46245325", "0.46223816", "0.46196648", "0.4615898", "0.46027634", "0.45930463", "0.45801324", "0.45754015", "0.45664796", "0.4565863", "0.45481327", "0.4545465", "0.45424223", "0.45395684", "0.45299995", "0.45272672", "0.45261878", "0.45030764", "0.44968453", "0.4493119", "0.44852442", "0.4470087", "0.4451736", "0.4440798", "0.44362655", "0.4435005", "0.44290313", "0.4428991", "0.44192794", "0.4417548", "0.4414386", "0.44133022", "0.44099998", "0.440851", "0.4394935", "0.4390189", "0.43853766", "0.43674916", "0.436684", "0.43622068", "0.43614104", "0.4353846", "0.4347268", "0.43462268", "0.43460256", "0.4342181", "0.43345642", "0.4331248", "0.43298328", "0.4318141", "0.4302419", "0.42981273", "0.42896748", "0.42873397", "0.42713046", "0.4266367", "0.42641234", "0.42592072" ]
0.77684784
0
Write a decorator that prints UPPER_SLICE and LOWER_SLICE before and after calling the function (func) that is passed in ( is to preserve the original func's docstring)
def sandwich(func): @functools.wraps(func) def wrapped_decorator(*args, **kwargs): print(UPPER_SLICE) arguments= func(*args, **kwargs) print(LOWER_SLICE) return arguments return wrapped_decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_wraps():\n print('func')", "def trace(func):\n @wraps(func)\n def tracer(*args, **kwargs):\n name = func.__name__\n stack_size = int(len(inspect.stack(0)) / 2) # @wraps(func) is also increasing the size\n indent = stack_size*'\\t'\n print(f'{indent} > Entering \"{name}\": args: {args}')\n result = func(*args, **kwargs)\n print(f'{indent} < Leaving \"{name}\"')\n return result\n\n return tracer", "def trace(filler):\n @decorator\n def dec(func):\n def wrapper(*args, **kwargs):\n indent = filler * wrapper.level\n arguments = ', '.join(str(x) for x in args)\n print('{} --> {}({})'.format(indent, func.__name__, arguments))\n wrapper.level += 1\n\n result = func(*args, **kwargs)\n print('{} <-- {}({}) == {}'.format(indent, func.__name__, arguments, result))\n wrapper.level -= 1\n return result\n wrapper.level = 0\n return wrapper\n return dec", "def speak(word='moo'):\n def decorator(func):\n def decorated(*args, **kwargs):\n print word\n return func(*args, **kwargs)\n return decorated\n return decorator", "def func_doc():", "def decor(func):\n def wrap():\n print(\"@@@ STATISTICS REPORT START @@@\\n\")\n func()\n print(\"@@@ STATISTICS REPORT FINISH @@@\\n\")\n return wrap", "def decorator(func):\n\n pass", "def __call__(self, func):\n func.__doc__ = self.doc\n return func", "def outer(func):\n print(\"1This is outer function\\n\", outer.__name__, outer.__doc__)\n\n # @wraps(func)\n def inner():\n \"\"\"\n 2.This is inner function doc\n \"\"\"\n print(\"2This is inner function\\n\", inner.__name__, inner.__doc__)\n func()\n\n return inner", "def subfunc(func, *args, **kwargs):\n out = partial(func, *args, **kwargs)\n\n # update the name\n out.__name__ = func.__name__ + \"\".join(\"_{}\".format(val) for val in kwargs.values())\n\n # update the docstring\n out.__doc__ = \"Apply {name} with kwargs {kw}.\\n\\nDocstring for {name}:\\n\\n{doc}\".format(\n name=func.__name__, kw=kwargs, doc=func.__doc__\n )\n\n return out", "def decorated(origFunc, newFunc, decoration='None'):\n\n pass", "def shown(func):\n name = f\"{func.__name__}( )\"\n @wraps(func)\n def wrapped_func(*args, **kwargs):\n res = func(*args, **kwargs)\n res = show(**{name: res})\n return res\n return wrapped_func", "def undoc(func):\n return func", "def test_decorator_works_the_same_as_explicit_calling(self):\n @required_parameters('arg1')\n def _func1_decorated(arg1=None, arg2=None, arg3=None):\n \"\"\"This is my docstring\"\"\"\n pass\n\n def _func2_undecorated(arg1=None, arg2=None, arg3=None):\n \"\"\"This is my docstring\"\"\"\n pass\n _func2_decorated = required_parameters('arg1')(_func2_undecorated)\n\n # Check that the decorated function gets the correct docstring\n self.assertEqual(_func1_decorated.__doc__, 'This is my docstring')\n\n # Check that both functions have the same docstring\n self.assertEqual(_func1_decorated.__doc__, _func2_decorated.__doc__)", "def decorator(func):\n\t\treturn push_aspect(name or func.__name__, func)", "def logcalls(prefix):\r\n # whitespace between arguments? __repr__\r\n def decorator(func):\r\n @functools.wraps(func)\r\n def inner(*args, **kwargs):\r\n arg_all_str = \",\".join([repr(arg) for arg in args] + [item[0] + \"=\" + repr(item[1])for item in kwargs.items()])\r\n sys.stderr.write(prefix + \": \" + func.__name__ + \"(\" + arg_all_str + \")\")\r\n sys.stderr.write(\"\\n\")\r\n sys.stderr.write(prefix + \": \" + func.__name__ + \" -> \" + repr(func(*args, **kwargs)))\r\n return func(*args, **kwargs)\r\n return inner\r\n return decorator", "def func_decorator(fun):\r\n count = 0\r\n\r\n def wrapper(*args, **kwargs):\r\n try:\r\n nonlocal count\r\n count += 1\r\n start = time.time()\r\n with contextlib.redirect_stdout(io.StringIO()) as f: fun(*args)\r\n duration = time.time() - start\r\n print(f'{fun.__name__}' + f' call {count}' + ' executed in ' + f'{duration}' + ' sec')\r\n print('Name: ' + f' {fun.__name__}')\r\n print('Type: ' + f' {type(fun)}')\r\n sig = signature(fun)\r\n print('Sign: ' + f' {sig}')\r\n print('Args: ' + ' positional ' + f'{args}' '\\n\\t key=worded ' + f'{kwargs}')\r\n doc = fun.__doc__\r\n doc = doc.splitlines()[1:-1]\r\n doc = '\\n\\t'.join(map(str, doc))\r\n print('Doc:' + f'{doc}')\r\n source = inspect.getsource(fun)\r\n source = source.splitlines()\r\n source = '\\n\\t\\t'.join(map(str, source))\r\n print('Source: ' + f'{source}')\r\n output = f.getvalue().splitlines()\r\n output = '\\n\\t\\t'.join(map(str, output))\r\n print('Output: ' + f'{output}')\r\n\r\n except:\r\n logging.exception(f'timestamp: {datetime.now()}')\r\n pass\r\n\r\n return wrapper", "def click_doc(arg):\n import inspect\n\n def decorator(function):\n if type(arg) is str:\n function.__doc__ = arg\n elif inspect.isclass(arg):\n function.__doc__ = arg.__doc__\n else:\n function.__doc__ = None\n return function\n\n return decorator", "def wrapper_fun(*args):\n print(\"Hello Decorator\")\n return fun(*args)", "def doc_apply(doc):\n\n def wrapper(func):\n func.__doc__ = doc\n return func\n\n return wrapper", "def _concatenate_docstrings(func):\n # NOTE: Originally had idea to use numpydoc.docscrape.NumpyDocString to\n # interpolate docstrings but *enormous* number of assupmtions would go into\n # this. And simple is better than complex.\n # Get matplotlib axes func\n name = func.__name__\n orig = getattr(maxes.Axes, name)\n odoc = inspect.getdoc(orig)\n if not odoc: # should never happen\n return func\n\n # Prepend summary and potentially bail\n # TODO: Does this break anything on sphinx website?\n fdoc = inspect.getdoc(func) or '' # also dedents\n regex = re.search(r'\\.( | *\\n|\\Z)', odoc)\n if regex:\n fdoc = odoc[:regex.start() + 1] + '\\n\\n' + fdoc\n if rc['docstring.hardcopy']: # True when running sphinx\n func.__doc__ = fdoc\n return func\n\n # Obfuscate signature by converting to *args **kwargs. Note this does\n # not change behavior of function! Copy parameters from a dummy function\n # because I'm too lazy to figure out inspect.Parameters API\n # See: https://stackoverflow.com/a/33112180/4970632\n dsig = inspect.signature(lambda *args, **kwargs: None)\n fsig = inspect.signature(func)\n func.__signature__ = fsig.replace(parameters=tuple(dsig.parameters.values()))\n\n # Concatenate docstrings and copy summary\n # Make sure different sections are very visible\n pad = '=' * len(name)\n doc = f\"\"\"\n ================================{pad}\n proplot.axes.Axes.{name} documentation\n ================================{pad}\n {fdoc}\n ==================================={pad}\n matplotlib.axes.Axes.{name} documentation\n ==================================={pad}\n {odoc}\n \"\"\"\n func.__doc__ = inspect.cleandoc(doc) # dedents and trims whitespace\n\n # Return\n return func", "def partial(func, *args, docs='', **keywords):\n @wraps(func)\n def wrapper(*fargs, **fkeywords):\n newkeywords = keywords.copy()\n newkeywords.update(fkeywords)\n return func(*(args + fargs), **newkeywords)\n wrapper.func = func\n wrapper.args = args\n wrapper.keywords = keywords\n wrapper.docs = docs\n return wrapper", "def logged(func):\n def wrapper(*args, **kwargs):\n print(’you called {.__name__}({}{}{})’.format(\n func,\n str(list(args))[1:-1], \n ’, ’ if kwargs else ’’,\n ’, ’.join(’{}={}’.format(*pair) for pair in kwargs.items()),\n ))\n val = func(*args, **kwargs)\n print(’it returned’, val)\n return val", "def _mayus(func):\r\n\t\tdef decorator(self, key, *args):\r\n\t\t\treturn func(self, str(key).upper(), *args)\r\n\t\treturn decorator", "def debug(func):\n\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n if args and not kwargs:\n print(\"~ input of {}: args: {}\".format(func.__name__, args))\n elif not args and kwargs:\n print(\"~ input of {}: kwargs: {}\".format(func.__name__, kwargs))\n elif args and kwargs:\n print(\"~ input of {}: args: {}, kwargs: {}\".format(func.__name__, args, kwargs))\n else:\n print(\"~ input of {}: NO_ARGS\".format(func.__name__))\n output = func(*args, **kwargs) # stores the result of the function\n print(\"~ output of {}:\".format(func.__name__), output)\n return output\n\n return decorated", "def debugargs(prefix='***'):\n def debug(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n print(prefix + ': ' + func.__qualname__)\n return func(*args, **kwargs)\n return wrapper\n return debug", "def print_log(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n n = func.__name__\n print('{} has started with arguments:\\n{}\\n{}'.format(\n n, args, kwargs))\n res = func(*args, **kwargs)\n print('{} has finished and returned: {}'.format(\n n, res))\n return res\n\n return wrapper", "def dumpArgs(func):\n def wrapper(*args, **kwargs):\n func_args = inspect.signature(func).bind(*args, **kwargs).arguments\n func_args_str = ', '.join('{} = {!r}'.format(*item)\n for item in func_args.items())\n print(f'{func.__module__}.{func.__qualname__} ( {func_args_str} )')\n result = func(*args, **kwargs)\n print(f'{func.__module__}.{func.__qualname__} Return Result: \\n {result}')\n return result\n return wrapper", "def f_Dumpfname(func):\n @wraps(func)\n def echo_func(*func_args, **func_kwargs):\n if DEBUG: print('func \\033[1;31m {}()\\033[0m called by \\033[1;31m{}() \\033[0m'.format(func.__name__,sys._getframe(1).f_code.co_name))\n return func(*func_args, **func_kwargs)\n return echo_func", "def print_timing(func):\n def wrapper(*arg):\n t1 = time.time()\n res = func(*arg)\n t2 = time.time()\n print '%s took %0.3f ms' % (func.func_name, (t2-t1)*1000.0)\n return res\n return wrapper", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def _add_doc(func, doc):\r\n func.__doc__ = doc", "def info(func):\n\n def decorated(*args, **kwargs):\n r\"\"\"Decorated method.\"\"\"\n runLog.info(func(*args, **kwargs))\n\n return decorated", "def wrap_in_func(self, func, *cols):\n return '{func}({args})'.format(func=func,\n args=', '.join(cols))", "def wrapper(func):\n docstring = func.__doc__\n helpdict = parse_docstring(\n docstring, key_symbol=key_symbol,\n description_symbol=description_symbol)\n func.helpdict = helpdict\n # remove markers\n docstring = docstring.replace(key_symbol, '')\n func.__doc__ = docstring.replace(description_symbol, '')\n return func", "def _add_doc(func, doc):\n func.__doc__ = doc", "def printHelpFunc(self, func, leadingstrings=None):\n if leadingstrings is None:\n leadingstrings = ['- ', ' ']\n a, idx = 0, None\n for line in func.__doc__.split('\\n'):\n if len(line) == 0:\n continue\n if idx is None:\n idx = len(line) - len(line.lstrip(' '))\n if len(line) == idx:\n continue\n print(leadingstrings[a] + line[idx:])\n a = 1", "def dump_args(func):\n\n def wrapper(*args, **kwargs):\n func_args = inspect.signature(func).bind(*args, **kwargs).arguments\n func_args_str = ', '.join('{} = {!r}'.format(*item) for item in func_args.items())\n print(f'{func.__module__}.{func.__qualname__} ( {func_args_str} )')\n return func(*args, **kwargs)\n\n return wrapper", "def logging(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n res = func(*args, **kwargs)\n print(func.__name__, args, kwargs)\n return res\n return wrapper", "def docstring_format(*values):\n\n def _decorator_(function):\n function.__doc__ = function.__doc__.format(*values).replace('_', '\\_')\n return function\n\n return _decorator_", "def custom_func():\n print(\"3This is custom function\\n\", custom_func.__name__, custom_func.__doc__)", "def double_rapper(func):\n @wraps(func)\n def rapper(*args, **kwargs):\n print('I am going to run {}'.format(func.__name__))\n func(*args, **kwargs)\n print('{} finished'.format(func.__name__))\n return rapper", "def textual(title, ordering_field=None):\n def decorator(func):\n def wraps(self, obj):\n result = func(self, obj)\n return result if result else u'---'\n\n wraps.short_description = title\n wraps.allow_tags = True\n\n if ordering_field:\n wraps.admin_order_field = ordering_field\n\n return wraps\n return decorator", "def humanized(humanized_func, **humanized_func_kwargs):\n def decorator(func):\n if isinstance(func, property):\n func = func.fget\n\n def _humanized_func(*args, **kwargs):\n return humanized_func(*args, **kwargs, **humanized_func_kwargs)\n func.humanized = _humanized_func\n return func\n return decorator", "def describe(self, *args, **kwargs):\n def _autodoc(func, *_args, **_kwargs):\n if len(_args) > 0:\n #: Instance or class method.\n response = func(_args[0])\n else:\n #: Function.\n if len(_kwargs) > 0:\n response = func(**_kwargs)\n else:\n response = func()\n\n self.parse(args[0], response)\n\n return func\n\n return decorator(_autodoc)", "def dump_args(func):\n\n def wrapper(*args, **kwargs):\n func_args = inspect.signature(func).bind(*args, **kwargs).arguments\n func_args_str = ', '.join('{} = {!r}'.format(*item) for item in func_args.items())\n log(f'{func.__module__}.{func.__qualname__} ( {func_args_str} )')\n return func(*args, **kwargs)\n\n return wrapper", "def wrapper(*args, **kwargs):\n print(f\"you are about to call {fn.__name__}\")\n print(f\"Here's the documentation: {fn.__doc__}\")\n return fn(*args, **kwargs)", "def print_args(func):\n def new_func(*args, **kwargs):\n print(args)\n print(kwargs)\n return func(*args, **kwargs)\n return new_func", "def decorator(func,*args,**kwargs):\n\t@functools.wraps(func)\n\tdef inner(*args,**kwargs):\n\t\t\"\"\" Uses functools.wraps to keep from showing this in the help(func) \"\"\"\n\t\tret = func(*args,**kwargs)\n\t\ttry:\n\t\t\tcookies = ret.cookies\n\t\t\tprint(cookies)\n\t\t\treturn(ret)\n\t\texcept:\n\t\t\treturn(ret)\n\treturn(inner)", "def wrapper(*args, **kwargs):\n print(f\"you are calling the {fn.__name__} function\")\n print(f\"Here's the documentation: {fn.__doc__}\")\n return fn(*args, **kwargs)", "def string_io(func):\n\t@wraps(func)\n\tdef wrapper(*args, **kwargs):\n\t\tRedirectSysStdOut.redirect_string_io() # 重定向标准输出\n\t\tresult = func(*args, **kwargs)\n\t\t\n\t\tfit_info = RedirectSysStdOut.redirect_undo()\n\t\tprint fit_info\n\t\t\n\t\treturn fit_info,result\n\treturn wrapper", "def important(func):\n\n def decorated(*args, **kwargs):\n \"\"\"Decorated method.\"\"\"\n runLog.important(func(*args, **kwargs))\n\n return decorated", "def info(arg1, arg2):\n print('Decorator arg1 = ' + str(arg1))\n print('Decorator arg2 = ' + str(arg2))\n\n def the_real_decorator(function):\n \"\"\"Decorator function\"\"\"\n def wrapper(*args, **kwargs):\n \"\"\"Decorator wrapper - the actual decorator\"\"\"\n print('S: Function {} args: {} kwargs: {}'.format(\n function.__name__, str(args), str(kwargs)))\n return function(*args, **kwargs)\n\n return wrapper\n\n return the_real_decorator", "def debug(func):\n\n @functools.wraps(func)\n def wrapper_debug(*args, **kwargs):\n args_repr = [repr(a) for a in args] # 1\n kwargs_repr = [f\"{k}={v!r}\" for k, v in kwargs.items()] # 2\n signature = \", \".join(args_repr + kwargs_repr) # 3\n print(f\"Calling {func.__name__}({signature})\")\n value = func(*args, **kwargs)\n print(f\"Returning {func.__name__!r}: {value!r}\") # 4\n return value\n\n return wrapper_debug", "def annotate(func:Callable, quiet:Optional[bool]):\n name = func.__name__\n @wraps(func)\n def inner(*args, **kwargs):\n _default_recorder.start(name)\n try:\n return func(*args, **kwargs)\n finally:\n _default_recorder.end(name, quiet)\n return inner", "def decorator(func):\n return survey_started(require(numbers)(set_navbar(minutes)(func)))", "def moo(func):\n def decorated(*args, **kwargs):\n print 'moo'\n return func(*args, **kwargs) # Run decorated function.\n return decorated", "def instrument(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n arg_str = ', '.join(str(a) for a in args)\n if instrument.TRIM_ARGS is not None and len(arg_str) > instrument.TRIM_ARGS:\n arg_str = arg_str[:instrument.TRIM_ARGS] + \" ...\"\n if instrument.SHOW_CALL:\n sys.stderr.write(\" \"*wrapper._depth + \"call to \" + f.__name__ + \": \" + arg_str + \"\\n\")\n wrapper._count += 1\n wrapper._depth += 1\n wrapper._max_depth = max(wrapper._depth, wrapper._max_depth)\n result = f(*args, **kwargs)\n wrapper._depth -= 1\n res_str = str(result)\n if instrument.TRIM_RET is not None and len(res_str) > instrument.TRIM_RET:\n res_str = res_str[:instrument.TRIM_RET] + \" ...\"\n if instrument.SHOW_RET:\n sys.stderr.write(\" \"*wrapper._depth + f.__name__ + \" returns: \" + res_str + \"\\n\")\n return result\n wrapper._count = 0\n wrapper._depth = 0\n wrapper._max_depth = 0\n return wrapper", "def inherit_docstring_from(cls):\n def _doc(func):\n cls_docstring = getattr(cls, func.__name__).__doc__\n func_docstring = func.__doc__\n if func_docstring is None:\n func.__doc__ = cls_docstring\n else:\n new_docstring = func_docstring % dict(super=cls_docstring)\n func.__doc__ = new_docstring\n return func\n return _doc", "def cytest(func):\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n bound = inspect.signature(func).bind(*args, **kwargs)\n return func(*bound.args, **bound.kwargs)\n\n return wrapped", "def docstring(func):\n try:\n lines = func.__doc__.strip().split(\"\\n\")\n return [line.strip() for line in lines]\n except AttributeError:\n return None", "def print_doc1(*args, **kwargs):\n # output settings from kwargs or take defaults\n color = kwargs.get('color', blue)\n bold = kwargs.get('bold', False)\n prefix = kwargs.get('prefix', '')\n tail = kwargs.get('tail', '\\n')\n\n def real_decorator(func):\n '''real decorator function'''\n @wraps(func)\n def wrapper(*args, **kwargs):\n '''the wrapper function'''\n try:\n prgf = first_paragraph(func.__doc__)\n print(color(prefix + prgf + tail, bold))\n except AttributeError as exc:\n name = func.__name__\n print(red(flo('{name}() has no docstring')))\n raise(exc)\n return func(*args, **kwargs)\n return wrapper\n\n invoked = bool(not args or kwargs)\n if not invoked:\n # invoke decorator function which returns the wrapper function\n return real_decorator(func=args[0])\n\n return real_decorator", "def print_doc1(*args, **kwargs):\n # output settings from kwargs or take defaults\n color = kwargs.get('color', blue)\n bold = kwargs.get('bold', False)\n prefix = kwargs.get('prefix', '')\n tail = kwargs.get('tail', '\\n')\n\n def real_decorator(func):\n '''real decorator function'''\n @wraps(func)\n def wrapper(*args, **kwargs):\n '''the wrapper function'''\n try:\n prgf = first_paragraph(func.__doc__)\n print(color(prefix + prgf + tail, bold))\n except AttributeError as exc:\n name = func.__name__\n print(red(flo('{name}() has no docstring')))\n raise(exc)\n return func(*args, **kwargs)\n return wrapper\n\n invoked = bool(not args or kwargs)\n if not invoked:\n # invoke decorator function which returns the wrapper function\n return real_decorator(func=args[0])\n\n return real_decorator", "def test_user_func_docstrings(self):\n for func in self.student_f:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} method needs a docstring\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} method needs a docstring\".format(func[0]))", "def debug(func):\n if VERBOSE > 0:\n @functools.wraps(func)\n def wrapper_debug(*args, **kwargs):\n args_repr = [repr(a) for a in args]\n kwargs_repr = [f\"{k}={v!r}\" for k, v in kwargs.items()]\n signature = \", \".join(args_repr + kwargs_repr)\n\n print(f\"Calling {func.__name__}({signature})\\n\")\n value = func(*args, **kwargs)\n print(f\"{func.__name__!r} returned {value!r}\\n\")\n\n return value\n\n return wrapper_debug\n else:\n return func", "def debug_decorator(func):\n @functools.wraps(func)\n def debug_wrapper(*args, **kwargs):\n if config.function_debug:\n bound_arguments = inspect.signature(func).bind(*args, **kwargs)\n bound_arguments.apply_defaults()\n\n debug_string = [\"Calling {} with arguments:\".format(func.__name__)]\n\n for key, value in bound_arguments.arguments.items():\n debug_string.append(\"{} = {}\".format(key, value))\n\n debug_string = \"\\n\".join(debug_string)\n logger.debug(debug_string)\n\n result = func(*args, **kwargs)\n\n logger.debug(\"{} returns {}\".format(func.__name__, result))\n \n else:\n result = func(*args, **kwargs)\n\n return result\n\n return debug_wrapper", "def debug(func):\n\n @functools.wraps(func)\n def wrapper_debug(*args, **kwargs):\n args_repr = [repr(a) for a in args]\n kwargs_repr = [f\"{k}={v!r}\" for k, v in kwargs.items()]\n signature = \", \".join(args_repr + kwargs_repr)\n print(f\"Calling {func.__name__}({signature})\")\n value = func(*args, **kwargs)\n print(f\"{func.__name__!r} returned {value!r}\")\n return value\n return wrapper_debug", "def func_custom(fname):\n def decorator(f):\n def decorated(*idp, **kwp):\n global tracer_data\n if hasattr(tracer_data, 'enabled') and tracer_data.enabled:\n try:\n call(fname)\n return f(*idp, **kwp)\n finally:\n ret()\n else:\n return f(*idp, **kwp)\n return decorated\n return decorator", "def old_function_with_docstring(x, y):\n return x + y", "def debug(func):\n\t@functools.wraps(func)\n\tdef wrapper_debug(*args, **kwargs):\n\t\targs_repr = [repr(a) for a in args] \n\t\tkwargs_repr = [f\"{k}={v}\" for k, v in kwargs.items()] \n\t\tsignature = \", \".join(args_repr + kwargs_repr) \n\n\t\tprint(f\"Calling {func.__name__} ({signature})\")\n\n\t\tvalue = func(*args, **kwargs)\n\t\tprint(f\"{func.__name__!r} returned {value!r}\") \n\t\t\n\t\treturn value\n\n\treturn wrapper_debug", "def arg_wrapper(func):\n\n @functools.wraps(func)\n def wrapper(*args,\n **kwargs):\n\n \"\"\" Simple profiler for the function.\n\n :param args: Args for the function.\n :param kwargs: Kwargs for the function.\n :return: The result of the function.\n \"\"\"\n\n _profiler = profilers.start_if_active(profile_id)\n\n # Run the function\n result = func(*args,\n **kwargs)\n\n profilers.stop_if_active(func=func,\n profile_id=profile_id,\n profiler=_profiler,\n sort_by=sort_by)\n\n # Return the function result\n return result\n\n # Return the decorated function\n return wrapper", "def debug(func):\n\n @functools.wraps(func)\n def wrapper_debug(*args, **kwargs):\n args_repr = [repr(a) for a in args]\n kwargs_repr = [f\"{k}={v!r}\" for k, v in kwargs.items()]\n signature = \", \".join(args_repr + kwargs_repr)\n print(f\"Calling {func.__name__}({signature})\")\n value = func(*args, **kwargs)\n print(f\"{func.__name__!r} returned {value!r}\")\n return value\n\n return wrapper_debug", "def deprecated_inner(func):\n def wrapper(*args, **kwargs):\n \"\"\" wrapper for deprecated decorator\n \"\"\"\n logger.warn(\"Deprecated function {0}. Please use '{1}' instead.\".format(func.__name__, use_instead))\n return func(*args, **kwargs)\n wrapper.__name__ = func.__name__\n wrapper.__doc__ = func.__doc__\n wrapper.__dict__.update(func.__dict__)\n return wrapper", "def inspect(decorated_function):\n def wrapper(*args, **kwargs):\n \"\"\"Wrapper function that adds functionality to decorated function\"\"\"\n print('Before function')\n value = decorated_function(*args, **kwargs)\n print('After function')\n return value\n return wrapper", "def _summary(function):\n if not function.__doc__:\n return \"{}.\".format(function.__name__.capitalize())\n result = []\n for word in function.__doc__.split():\n result.append(word)\n if word.endswith(\".\"):\n break\n return \" \".join(result)", "def glue_lowering(*args):\n\n def wrap(fn):\n key = args[0]\n\n def real_call():\n glue = _overload_glue(args[0], typing_key=key)\n return glue.wrap_impl(*args[1:])(fn)\n\n _overload_glue.defer_lowering(key, real_call)\n return fn\n return wrap", "def info_decorator(func):\n\n def wrapper(*args, **kwargs):\n\n return func(*args, **kwargs)\n\n return wrapper", "def function_timer(orig_func):\n import time\n\n @wraps(orig_func)\n def wrapper(*args, **kwargs):\n t1 = time.time()\n result = orig_func(*args, **kwargs)\n t2 = time.time()\n print('{} ran in: {} sec'.format(orig_func.__name__, t2))\n return result\n\n return wrapper", "def dump_args_and_ret(func):\n fname = func.__name__\n\n def echo_func(*args, **kwargs):\n print(f\"{fname} args={args} kwargs={kwargs}\")\n ret = func(*args, *kwargs)\n print(f\"{fname} args={args} kwargs={kwargs} ret={ret}\")\n return ret\n return echo_func", "def with_timings(function):\n\n @functools.wraps(function)\n def decorator(*args, **kwargs):\n start_time = time.time()\n ret = function(*args, **kwargs)\n duration_secs = time.time() - start_time\n print(\n f\"== Index Call == {style(function.__name__, bold=True)}: \"\n f\"{duration_secs*1000}\",\n file=sys.stderr,\n flush=True,\n )\n return ret\n\n return decorator", "def copy_docstring(other):\n\n def wrapper(func):\n func.__doc__ = other.__doc__\n return func\n\n return wrapper", "def format_partial(func: Callable, verbose: bool = False) -> str:\n fname = func.__qualname__ if hasattr(func, '__qualname__') else str(func)\n arg_str = \", \".join([repr(a) for a in func.args])\n kwargs_str = \", \".join([str(k)+\":\"+repr(v)\n for k, v in func.keywords.items()])\n if verbose:\n repr_ = f\"<partial {fname}:{arg_str} {kwargs_str}>\"\n else:\n repr_ = f\"<partial {fname}>\"\n return repr_", "def desc(text):\n def _decoration(fcn):\n fcn.desc = text\n return fcn\n return _decoration", "def timed(func):\n\n @functools.wraps(func)\n def wrap(*args, **kwargs):\n start = time.perf_counter()\n func(*args, **kwargs)\n stop = time.perf_counter()\n print(f'{func.__name__} executed in {stop - start}s')\n\n return wrap", "def _debug_wrap(func):\n\n def wrapper(*args, **kwargs):\n _debug_print(f\"{datetime.datetime.now()} - About to run: {func.__name__}\")\n ret_val = func(*args, **kwargs)\n _debug_print(f\"{datetime.datetime.now()} - Completed run: {func.__name__}\")\n return ret_val\n\n return wrapper", "def fmt_rust_function(func: Callable) -> str:\n return f\"{func.__module__}:{func.__code__.co_firstlineno}:{func.__name__}\"", "def test_function_doc_string():\n functions = inspect.getmembers(session10, inspect.isfunction)\n for function in functions:\n assert function[1].__doc__", "def stringfilter(func):\n @wraps(func)\n def _dec(*args, **kwargs):\n if args:\n args = list(args)\n args[0] = str(args[0])\n return func(*args, **kwargs)\n\n return _dec", "def timeit(func_to_decorate):\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func_to_decorate(*args, **kwargs)\n elapsed = ((time.time() - start) /60 )\n log.debug(\"[TIMING]: %s - %s minutos\" % (func_to_decorate.__name__, elapsed))\n print(\"[TIMING]: %s - %s minutos\" % (func_to_decorate.__name__, round(elapsed, 2)))\n print(\"********************************* fin ********************************\")\n return result\n\n wrapper.__doc__ = func_to_decorate.__doc__\n wrapper.__name__ = func_to_decorate.__name__\n return wrapper", "def prep_logging_decorator(func):\n filename = filename_regexp.match(inspect.getmodule(inspect.stack()[1][0]).__file__).group(1)\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n logger.info('{}: Start preparation'.format(filename))\n func(*args, **kwargs)\n logger.info('{}: Finished preparation'.format(filename))\n return wrapper", "def timing(func):\n @wraps(func)\n def wrap(*args, **kw):\n start = time()\n result = func(*args, **kw)\n end = time()\n print(f\"{func.__name__} took: {end-start}:2.4f sec\\n\")\n return result\n return wrap", "def _function_name(func):\n return \"Calling the function: def {}()\".format(func.__name__)", "def test_markers(f):\n @wraps(f)\n def wrapper(*args, **kwds):\n Print._test_start(f.__name__)\n ret = f(*args, **kwds)\n Print._test_stop(f.__name__)\n return ret\n return wrapper", "def log_func_edges(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"Generic wrapper function.\"\"\"\n logging.info(f\"Entering `{func.__name__}` for processing...\")\n results = func(*args, **kwargs)\n logging.info(f\"Exiting processing for `{func.__name__}`\")\n return results\n\n return wrapper", "def hldit(func):\n @wraps(func)\n def inner(*args, **kwargs):\n t1 = timeit.default_timer()\n result = func(*args, **kwargs)\n t2 = timeit.default_timer()\n dt = format_timespan(t2-t1)\n click.secho(f'\\n{func.__name__} finished in {dt}', fg='cyan')\n return result\n\n return inner", "def debug_decorator(func):\n\n def wrapper(*args, **kwargs):\n\n from main_loop import debug_mode\n\n if debug_mode:\n\n game_logger.logging.debug(\"Function name: \" + func.__name__)\n\n game_logger.logging.debug(\"Args: \")\n game_logger.logging.debug(args)\n\n game_logger.logging.debug(\"Kwargs: \")\n game_logger.logging.debug(kwargs)\n\n return func(*args, **kwargs)\n\n return wrapper", "def print_title( title, decorators ):\n decorators = \"*\" * decorators\n print \"\\n%s %s: %s\\n\" % ( decorators, title, decorators )", "def print_messages(start_message: str, end_message: str):\n def _print_messages(func):\n def new_func(*args, **kwargs):\n print(start_message)\n result = func(*args, **kwargs)\n print(end_message)\n return result\n return new_func\n return _print_messages", "def __call__(s, func):\n @functools.wraps(func)\n def inner(*args, **kwargs):\n with s:\n return func(*args, **kwargs)\n return inner" ]
[ "0.59775233", "0.59042966", "0.58600795", "0.58352566", "0.58184", "0.5748021", "0.5739565", "0.5706729", "0.56139064", "0.55796224", "0.5578859", "0.5534652", "0.5532867", "0.5523652", "0.55230683", "0.55105054", "0.54960775", "0.5491101", "0.5476996", "0.54521424", "0.5421226", "0.54205894", "0.5416279", "0.541566", "0.5415434", "0.5414294", "0.53823316", "0.5376243", "0.53682345", "0.53658664", "0.5364335", "0.5364335", "0.5364335", "0.5363535", "0.5361136", "0.53589463", "0.53499", "0.5346358", "0.53444487", "0.53355527", "0.5335206", "0.5335168", "0.5323409", "0.52885234", "0.52459913", "0.5237354", "0.52340394", "0.5229718", "0.5227269", "0.52154267", "0.51986414", "0.5195617", "0.51913846", "0.51786494", "0.51595795", "0.51560843", "0.51501316", "0.5137021", "0.5129895", "0.5122412", "0.51210296", "0.511157", "0.5103297", "0.5103297", "0.5100224", "0.50909036", "0.5086588", "0.508489", "0.50726223", "0.506753", "0.5063905", "0.5050378", "0.50444204", "0.50443053", "0.50337774", "0.502886", "0.50214005", "0.50198656", "0.50146705", "0.5014233", "0.500414", "0.5002944", "0.5002442", "0.49862105", "0.4985434", "0.49801072", "0.49759865", "0.49664256", "0.49660498", "0.49583143", "0.49581248", "0.49574357", "0.4944649", "0.4933977", "0.4931947", "0.49269292", "0.49182037", "0.4914639", "0.49137658", "0.49108088" ]
0.8172087
0
Generates graph of connected peers Change to make this customisable
def generate_graph(self): temp_graph = [[] for i in xrange(Parameters.num_peers)] unconnected = set([i for i in xrange(Parameters.num_peers)]) while len(unconnected) > 1: node1 = random.sample(unconnected, 1)[0] unconnected.remove(node1) node2 = random.sample(unconnected, 1)[0] temp_graph[node2].append(self.nodes[node1]) temp_graph[node1].append(self.nodes[node2]) unconnected = set([i for i in xrange(Parameters.num_peers)]) i = 0 for i in xrange(Parameters.num_peers*Parameters.num_neighbours/2-Parameters.num_peers): a = random.sample(unconnected, 1)[0] b = random.sample(unconnected, 1)[0] while b == a: b = random.sample(unconnected, 1)[0] temp_graph[a].append(self.nodes[b]) temp_graph[b].append(self.nodes[a]) graph = {} for i in xrange(len(self.nodes)): graph["P_" + str(i)] = list(set(temp_graph[i])) return graph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chain_graph(self) -> nx.DiGraph:\n edg_lst = [\n (f\"p{idx}\", f\"p{idx+1}\", self.d[f\"p{idx+1}\"]) for idx in range(self.n)\n ]\n chain_graph = nx.DiGraph()\n chain_graph.add_weighted_edges_from(edg_lst)\n return chain_graph", "def chain_graph(self) -> nx.DiGraph:\n edg_lst = [\n (f\"p{idx}\", f\"p{idx+1}\", self.a[f\"p{idx+1}\"]) for idx in range(self.n)\n ]\n chain_graph = nx.DiGraph()\n chain_graph.add_weighted_edges_from(edg_lst)\n return chain_graph", "def gen_graph(self):", "def _create_connections(self):\n self.predecessors = {}\n self.successors = {}\n for nd in self.nodes:\n self.predecessors[nd.name] = []\n self.successors[nd.name] = []\n\n for (nd_out, nd_in) in self.edges:\n self.predecessors[nd_in.name].append(nd_out)\n self.successors[nd_out.name].append(nd_in)", "def getConnectedPeers(self, peerType):\r\n raise NotImplementedError()", "def make_graph(self):\n # update the neighbors in the graph\n self.update_neighbors()\n\n # Go through each node and get their neighbors\n self.edges = []\n for node_name in self.nodes:\n\n # get the neighbors\n node_neighbors = self.nodes[node_name].neighbors\n\n # go through neighbors\n for neighbor_name in node_neighbors:\n\n # Make the edge key\n edge_key = \"-\".join(sorted([node_name, neighbor_name]))\n\n # Add it to the edge list if it is not already present\n if edge_key not in self.edges:\n\n self.edges.append(edge_key)\n\n return self.edges", "def connections(self, src=False, dst=True, params=True): \n conns = []\n if params:\n if src:\n #grab the node params that this node is a src to\n edges = self.parent.graph.out_edges(self, data=True) \n conns.extend([ edge[2][\"dst_param\"] for edge in edges ])\n if dst:\n #grab the node param that this node is a dst to\n edges = self.parent.graph.in_edges(self, data=True) \n conns.extend([ edge[2][\"src_param\"] for edge in edges ])\n else: \n if src:\n conns.extend(self.parent.graph.successors(self))\n if dst:\n conns.extend(self.parent.graph.predecessors(self))\n \n return conns", "def contract(self, cycle):\n # create a new id to represent the cycle in the resulting graph.\n new_id = Digraph.new_node_id\n Digraph.new_node_id += 1\n\n # we store links that cross into and out of the cycle in these maps. the\n # to_cycle map contains links reaching into the cycle, and is thus a map\n # from each target node in the cycle to a list of source nodes that\n # reach that target from outside the cycle. the from_cycle map contains\n # links going out from the cycle, and is thus a map from each source\n # node in the cycle to a list of target nodes outside the cycle.\n to_cycle = collections.defaultdict(list)\n from_cycle = collections.defaultdict(list)\n\n scores = {}\n labels = {}\n succs = collections.defaultdict(list)\n for source, target in self.iteredges():\n if source in cycle:\n if target not in cycle:\n from_cycle[target].append(source)\n elif target in cycle:\n # we know source is not in cycle from above.\n to_cycle[source].append(target)\n else:\n succs[source].append(target)\n succs[target]\n scores[source, target] = self.get_score(source, target)\n labels[source, target] = self.get_label(source, target)\n\n old_edges = collections.defaultdict(list)\n\n # for each target in our graph that's reachable from the cycle, add an\n # edge from our new node to that target, with an appropriate score.\n for target, sources in from_cycle.items():\n succs[new_id].append(target)\n max_score = -1e100\n max_source = None\n for s in sources:\n score = self.get_score(s, target)\n if score > max_score:\n max_score = score\n max_source = s\n old_edges[max_source].append(target)\n scores[new_id, target] = max_score\n labels[new_id, target] = self.get_label(max_source, target)\n\n # before we handle the to_cycle map, we need to build some convenience\n # information for the cycle -- total score, and predecessor edges.\n pred = {}\n cycle_score = 0\n for s, t in cycle.iteredges():\n pred[t] = s\n cycle_score += self.get_score(s, t)\n\n # for each source in our graph that reaches into the cycle, add an edge\n # from the source to our new node, with an appropriate edge score.\n for source, targets in to_cycle.items():\n succs[source].append(new_id)\n max_score = -1e100\n max_target = None\n for t in targets:\n score = self.get_score(source, t) - self.get_score(pred[t], t)\n if score > max_score:\n max_score = score\n max_target = t\n old_edges[source].append(max_target)\n scores[source, new_id] = cycle_score + max_score\n labels[source, new_id] = self.get_label(source, max_target)\n\n return (\n new_id,\n old_edges,\n Digraph(\n succs, lambda s, t: scores[s, t], lambda s, t: labels[s, t]\n ),\n )", "async def peers() -> dict:\n ips = [peer.ip for peer in chain.peers]\n return {\"peers\": ips}", "def generate_connection_e(self,N_e):\n raise NotImplementedError", "def getPeers(self, peerType):\r\n raise NotImplementedError()", "def getPeerToPeerNetwork(self):\r\n raise NotImplementedError()", "def peers():\n return flask.jsonify(api_utils.get_peer_conf_and_state())", "def create_graph_network(start_node, connections):\n graph = nx.Graph()\n graph.add_node(start_node)\n print(connections.index)\n graph.add_nodes_from(connections.index)\n edge_list = list(zip(itertools.repeat(start_node), connections.index))\n print(\"edge list is \", edge_list)\n graph.add_edges_from(edge_list)\n for i in graph.edges():\n graph[i[0]][i[1]]['weight'] = connections.loc[i[1]]['count']\n # graph[i[0]][i[1]]['proposal_number'] = connections.loc[i[1]]['proposal_number']\n # graph[i[0]][i[1]]['institution'] = connections.loc[i[1]]['institution']\n # graph[i[0]][i[1]]['proposal_title'] = connections.loc[i[1]]['proposal_title']\n # graph[i[0]][i[1]]['project_status'] = connections.loc[i[1]]['project_status']\n\n # Adding random position data to the graph.\n # pos = nx.spring_layout(graph, k=1)\n pos = nx.circular_layout(graph)\n nx.set_node_attributes(graph, 'pos', pos)\n return graph", "def connect_nodes(self):\n for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):\n if src_id != trg_id:\n app.edges[src_id].add(trg_id)\n self.mark_as_unsaved()\n self.update()", "def graph(self):\n ...", "def build_graph(self):\n pass", "def cc_visited(ugraph):\r\n\tremaining_node = ugraph.keys()\t\t#The keys are accessible directly.\r\n\t\r\n\tcon_com = [] #connected component\r\n\twhile len(remaining_node) != 0 :\r\n\t\tnode = random.choice(remaining_node)\r\n\t\tvisited = bfs_visited(ugraph,node)\r\n\t\tcon_com.append(visited)\r\n\t\tfor item in visited:\r\n\t\t\tremaining_node.remove(item)\r\n\treturn con_com", "def build_graph(self):\n for node in self.graph.nodes():\n self.c2py[node] = PyNode(node)\n for _input in node.inputs():\n if _input not in self.c2py:\n self.c2py[_input] = PyNode(_input, True)\n if _input in self.forward_edge:\n self.forward_edge[_input].append(node)\n else:\n self.forward_edge[_input] = [node]\n for output in node.outputs():\n if output not in self.c2py:\n self.c2py[output] = PyNode(output, True)\n if node in self.forward_edge:\n self.forward_edge[node].append(output)\n else:\n self.forward_edge[node] = [output]", "def _generate_graph(self) -> None:\n self.g_ = nx.random_partition_graph(list(self._community_sizes),\n p_in=self.community_p_in,\n p_out=self.community_p_out,\n seed=self.seed)\n\n for _, nv in self.g_.nodes.data():\n nv[\"infected\"] = 0\n nv[\"immune\"] = False\n nv[\"alive\"] = True\n nv[\"_edges\"] = []\n nv[\"isolated\"] = False\n nv[\"mask\"] = 0.0", "def FindEulerCycle(self):\n\n if self.IsEulerGraph():\n nodes_copy = [n for n in self.nodes]\n edges_copy = [e for e in self.edges]\n connections_copy = [(a, b) for (a, b) in self.connections]\n euler_cycle = list()\n\n starting_node_index = random.randint(1, len(self.nodes))\n starting_node = self.nodes[starting_node_index - 1]\n euler_cycle.append(starting_node.index)\n\n current_node = starting_node\n\n self.PrintGraph()\n\n while len(self.connections) != 0:\n for i, neighbour in enumerate(current_node.neighbours):\n if (not self.CheckIfConnectionIsABridge(neighbour, current_node.index)) or (i == (len(current_node.neighbours) - 1)):\n euler_cycle.append(neighbour)\n self.Disconnect(neighbour, current_node.index)\n current_node = self.nodes[neighbour - 1]\n break\n\n self.nodes = [n for n in nodes_copy]\n self.edges = [e for e in edges_copy]\n self.connections = [(a, b) for (a, b) in connections_copy]\n\n euler_cycle_readable_format = \"[\"\n for node in euler_cycle:\n euler_cycle_readable_format += \" {} -\".format(node)\n euler_cycle_readable_format = euler_cycle_readable_format[:-1] \n euler_cycle_readable_format += \"]\"\n\n return euler_cycle_readable_format", "def getConnectionsBetweenSuperPeers(self):\r\n raise NotImplementedError()", "def _build_dependency_graph(self):\n\n #\n # Find the binary roles\n #\n nodes, roles = self._find_roles()\n\n #\n # Build the graph\n #\n working_list = list(set(nodes.keys()))\n\n setters = [b for b, r in roles.items() if Role.SETTER in r or Role.SETTER_GETTER in r]\n\n while working_list:\n b = working_list[0]\n working_list = working_list[1:]\n\n if nodes[b] not in self._graph:\n self._graph[nodes[b]] = []\n\n # it's a root node\n if Role.GETTER not in roles[b] and Role.SETTER_GETTER not in roles[b]:\n nodes[b].set_root()\n\n # takes params from some other binary\n else:\n is_orphan = True\n for setter in setters:\n setter_strings_set = set(nodes[setter].role_strings)\n node_strings_set = set(nodes[b].role_strings)\n if setter_strings_set.intersection(node_strings_set):\n if nodes[setter] not in self._graph:\n self._graph[nodes[setter]] = []\n self._graph[nodes[setter]].append(nodes[b])\n is_orphan = False\n\n # mark orphans\n if is_orphan:\n nodes[b].set_orphan()\n\n # Clean up\n for k, childs in self._graph.iteritems():\n self._graph[k] = list(set(childs))\n\n # set leaves\n for k, c in self._graph.iteritems():\n if not c:\n k.set_leaf()\n\n # post processing:\n # remove those nodes that are not orphans\n # and are not network parsers\n\n nodes = self.nodes\n children = [c for x in self._graph.values() for c in x if x]\n leafs_non_orphan = [n for n in nodes if n.leaf and not n.orphan]\n seed_names = [x.split('/')[-1] for x in self._seed_bins]\n spurious_nodes = [n for n in leafs_non_orphan if n not in children and n.bin.split('/')[-1] not in seed_names]\n for to_rem in spurious_nodes:\n del self._graph[to_rem]", "def build_graph(self):\n raise NotImplementedError", "def _build_graph(self):\n pass", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if (neighbour, vertex) not in edges:\n edges.append((vertex, neighbour))\n \n for pair in edges:\n for otherpair in edges:\n if pair[1] == otherpair[0]:\n edges.append((pair[0],otherpair[1]))\n return edges", "def build_node_graph(self):\n G = pgv.AGraph(strict=False, directed=True)\n temp_dict = defaultdict(int) #key - from_to_ip, val - counter\n\n for i, ip in enumerate(self.node_graph_dict.keys()):\n G.add_node(ip, shape='rect', label='%d' % i)\n logger.info(\"All nodes added\")\n\n for ip, droplist in self.node_graph_dict.iteritems():\n for gnid, dropids in droplist:\n for did in dropids:\n tip = self.gnid_ip_dict[self.oid_gnid_dict[did]]\n k = '{0}_{1}'.format(ip, tip)\n temp_dict[k] += 1\n\n for k, v in temp_dict.iteritems():\n ks = k.split('_')\n G.add_edge(ks[0], ks[1], weight=v)\n\n return G", "def gen_network(self):\n di = nx.DiGraph()\n di.add_edges_from(self.network_edges())\n di.add_nodes_from(self.network_nodes())\n self.network = di\n self.highlight_cycles()\n return self", "def populate_graph(self):", "def _connect_neighbours(self):\n for prev in self.unvisited:\n for next in self.unvisited:\n if (next[0] == prev[0] and next[1] == prev[1] + 1) or (next[0] == prev[0] + 1 and next[1] == prev[1]):\n self.graph.addEdge((prev, next))\n self.visited.add(prev)\n self.visited.add(next)\n if self._find_intersection():\n self.intersection.append(prev)\n self.intersection.append(next)", "def get_peer_nodes(self):\n return list(self.__peer_nodes)", "def get_peer_nodes(self):\n return list(self.__peer_nodes)", "def cc_visited(ugraph):\n remain = set(ugraph.keys())\n conn_comp = []\n while remain:\n node = remain.pop()\n visited = bfs_visited(ugraph, node)\n conn_comp.append(visited)\n remain = remain.difference(visited)\n return conn_comp", "def get_relationships_for_peers_on_date(self, source_id, destination_id, from_, to):\n raise errors.Unimplemented()", "def _write_conne(parameters):\n from ._common import connections\n\n # Reorder connections\n if parameters[\"connections_order\"] is not None:\n order = parameters[\"connections_order\"]\n else:\n order = parameters[\"connections\"].keys()\n\n # Format\n label_length = len(max(parameters[\"connections\"], key=len)) // 2\n fmt = block_to_format[\"CONNE\"]\n fmt = str2format(fmt[label_length])\n\n out = []\n for k in order:\n data = deepcopy(connections)\n data.update(parameters[\"connections\"][k])\n\n values = [\n k,\n data[\"nseq\"],\n data[\"nadd\"][0] if data[\"nadd\"] is not None else None,\n data[\"nadd\"][1] if data[\"nadd\"] is not None else None,\n data[\"permeability_direction\"],\n data[\"nodal_distances\"][0],\n data[\"nodal_distances\"][1],\n data[\"interface_area\"],\n data[\"gravity_cosine_angle\"],\n data[\"radiant_emittance_factor\"],\n ]\n out += write_record(values, fmt)\n\n return out", "def create_graph_network_visualization(graph_network, connections, connections_grouped):\n\n edge_trace = go.Scatter(\n x=[],\n y=[],\n customdata=[],\n text=[],\n line=dict(width=2, color='#888'),\n hoverinfo='all',\n mode='lines+text',\n textposition='top left',\n )\n edge_label_trace = go.Scatter(\n x=[],\n y=[],\n text=[],\n textposition='top left',\n mode='markers+text',\n hoverinfo='none',\n marker=go.Marker(\n opacity=0\n ),\n textfont=dict(size=20, color='black')\n )\n\n for edge in graph_network.edges():\n x0, y0 = graph_network.node[edge[0]]['pos']\n x1, y1 = graph_network.node[edge[1]]['pos']\n edge_weight = graph_network.node[edge[1]]['pos']\n edge_trace['x'] += tuple([x0, x1, None])\n edge_trace['y'] += tuple([y0, y1, None])\n\n text = graph_network[edge[0]][edge[1]]['weight']\n edge_label_trace['x'] += tuple([(x0 + x1) / 2])\n edge_label_trace['y'] += tuple([(y0 + y1) / 2])\n edge_label_trace['text'] += tuple([text])\n\n # writing to edge customdata\n edge_trace['customdata'] += graph_network[edge[0]][edge[1]]['weight']\n edge_trace['text'] = str(graph_network[edge[0]][edge[1]]['weight'])\n # edge_trace['marker']['size'] += professor_graph[edge[0]][edge[1]]['weight']\n # print(graph_network[edge[0]][edge[1]]['weight'])\n\n node_trace = go.Scatter(\n x=[],\n y=[],\n text=[],\n hovertext=[],\n mode=\"markers+text\",\n hoverinfo='text',\n textposition='bottom center',\n marker=dict(\n showscale=False,\n # colorscale options\n # ['Greys', 'YlGnBu', 'Greens', 'YlOrRd', 'Bluered', 'RdBu',\n # 'Reds', 'Blues', 'Picnic', 'Rainbow', 'Portland', 'Jet',\n # 'Hot', 'Blackbody', 'Earth', 'Electric', 'Viridis', 'Cividis]\n colorscale='YlGnBu',\n reversescale=True,\n color=[],\n size=40,\n colorbar=dict(\n thickness=15,\n title='Node Connections',\n xanchor='left',\n titleside='right'\n ),\n line=dict(width=2))\n )\n\n entry_bool = True\n\n for node in graph_network.nodes():\n x, y = graph_network.node[node]['pos']\n node_trace['x'] += tuple([x])\n node_trace['y'] += tuple([y])\n # node_trace['text'].append(node)\n\n # x, y = professor_graph.node[node]['pos']\n # node_trace['x'].append(x)\n # node_trace['y'].append(y)\n\n if entry_bool:\n # node_trace['text'].append(node)\n node_trace['text'] += tuple([node])\n entry_bool = False\n total_projects = \"Total Projects: {}\".format(len(connections[\"Proposal Number:\"].unique()))\n print(\"Total Projects\", total_projects)\n node_trace['hovertext'] += tuple([total_projects])\n else:\n # node_trace['text'].append(node)\n node_trace['text'] += tuple([node])\n some_text = []\n some_text.append(node + \"<br>\")\n for i in range(len(connections_grouped.loc[node]['proposal_number'])):\n if i > 0:\n some_text.append(\"<br>\")\n print(\"list index is \", i)\n print(\"prop number is \", connections_grouped.loc[node]['proposal_number'][i])\n some_text.append(connections_grouped.loc[node]['proposal_number'][i])\n # import pdb\n # pdb.set_trace()\n some_text.append(\"<br>\")\n some_text.append(connections_grouped.loc[node]['proposal_title'][i])\n some_text.append(\"<br>\")\n some_text.append(connections_grouped.loc[node]['project_status'][i])\n some_text.append(\"<br>\")\n some_text.append(connections_grouped.loc[node]['institution'][i])\n some_text.append(\"<br>\")\n some_text = [x for x in some_text if str(x) != 'nan']\n\n some_text = \"\".join(some_text)\n print(node)\n print(\"yo is \", some_text)\n # node_trace['hovertext'].append(some_text)\n node_trace['hovertext'] += tuple([some_text])\n\n for node, adjacencies in enumerate(graph_network.adjacency_list()):\n # print(node,adjacencies)\n # print(professor_graph[node])\n node_trace['marker']['color'] += tuple([len(adjacencies)])\n\n return node_trace, edge_trace, edge_label_trace", "def build_node_chains(self):\n\n self.node_chain_lookup = -np.ones(self.tri.npoints, dtype=np.int)\n self.node_chain_list = []\n\n node_chain_idx = 1\n\n self.node_chain_list.append([]) # placeholder for any isolated base-level nodes\n\n for node1 in self.node_high_to_low: \n if (self.node_chain_lookup[node1] != -1): \n continue\n\n junction, this_chain = self._node_walk_downhill(node1)\n\n if len(this_chain) > 1:\n self.node_chain_list.append(this_chain)\n \n self.node_chain_lookup[this_chain[0:-1]] = node_chain_idx \n if self.node_chain_lookup[this_chain[-1]] == -1:\n self.node_chain_lookup[this_chain[-1]] = node_chain_idx\n\n node_chain_idx += 1\n\n else: \n self.node_chain_list[0].append(this_chain[0])\n self.node_chain_lookup[this_chain[0]] = 0\n\n return", "def reactions(self):\n for node in self.graph:\n if not isinstance(node, Molecule):\n yield node", "def connecting(node1, node2):\n comp_list = []\n \"\"\":type : list[components.Component]\"\"\"\n if node1 == node2:\n return []\n for comp in node1.connected_comps:\n if comp.neg == node2:\n comp_list.append(comp)\n elif comp.pos == node2:\n comp_list.append(comp)\n return comp_list", "def generate_networks(self):\n\n # Defines dictionary of residue interaction types to include as network\n # edges.\n #**N.B.** Might want to provide these interactions as a program input?\n # **N.B.** 'intra' in the interaction names dict refers to interactions\n # between residues in the same chain\n interactions = [['hb', 'hb_pairs', 'hb_pairs_fasta_intra'],\n ['nhb', 'nhb_pairs', 'nhb_pairs_fasta_intra'],\n ['plusminus2', 'minus_2', 'minus_2_fasta'],\n ['plusminus2', 'plus_2', 'plus_2_fasta'],\n ['plusminus1', 'minus_1', 'minus_1_fasta'],\n ['plusminus1', 'plus_1', 'plus_1_fasta'],\n ['vdw', 'van_der_waals', 'van_der_waals_fasta_intra']]\n\n # Initialises MultiGraph (= undirected graph with self loops and\n # parallel edges) network of interacting residues\n G = nx.MultiGraph()\n\n # Adds nodes (= residues) to MultiGraph, labelled with their side-chain\n # identity (initially set to unknown), z-coordinate, buried surface area\n # (sandwiches only) and whether they are edge or central strands\n # (sandwiches only).\n if self.barrel_or_sandwich == '2.40':\n for num in range(self.input_df.shape[0]):\n node = self.input_df['domain_ids'][num] + self.input_df['res_ids'][num]\n aa_id = self.input_df['fasta_seq'][num]\n int_or_ext = self.input_df['int_ext'][num][0:3]\n z_coord = self.input_df['z_coords'][num]\n try:\n phi_psi_class = self.input_df['phi_psi_class'][num]\n except KeyError:\n phi_psi_class = '-'\n if not int_or_ext in ['int', 'ext']:\n raise ValueError('Residue {} has not been assigned to the '\n 'interior or exterior surface of the input'\n ' beta-barrel structure'.format(node))\n G.add_node(node, type='strand', aa_id=aa_id, int_ext=int_or_ext,\n eoc='-', z=z_coord, phipsi=phi_psi_class)\n elif self.barrel_or_sandwich == '2.60':\n for num in range(self.input_df.shape[0]):\n node = self.input_df['domain_ids'][num] + self.input_df['res_ids'][num]\n aa_id = self.input_df['fasta_seq'][num]\n int_or_ext = self.input_df['int_ext'][num][0:3]\n z_sandwich_coord = self.input_df['sandwich_z_coords'][num]\n #z_strand_coord = self.input_df['strand_z_coords'][num]\n #buried_surface_area = self.input_df['buried_surface_area'][num]\n edge_or_central = self.input_df['edge_or_central'][num][0:3]\n try:\n phi_psi_class = self.input_df['phi_psi_class'][num]\n except KeyError:\n phi_psi_class = '-'\n if not int_or_ext in ['int', 'ext']:\n raise ValueError('Residue {} has not been assigned to the '\n 'interior or exterior surface of the input'\n ' beta-barrel structure'.format(node))\n G.add_node(node, type='strand', aa_id=aa_id, int_ext=int_or_ext,\n z=z_sandwich_coord,\n #zstrand=z_strand_coord, bsa=buried_surface_area,\n eoc=edge_or_central,\n phipsi=phi_psi_class)\n\n domain_res_ids = list(G.nodes())\n\n # Adds edges (= residue interactions) to MultiGraph, labelled by\n # interaction type. The interactions considered are defined in\n # interactions_dict.\n for int_list in interactions:\n edge_label = int_list[0]\n int_name = int_list[1]\n int_fasta = int_list[2]\n\n for num in range(self.input_df.shape[0]):\n res_1 = self.input_df['domain_ids'][num] + self.input_df['res_ids'][num]\n res_list = self.input_df[int_name][num]\n if type(res_list) != list:\n res_list = [res_list]\n\n for res_index, res_2 in enumerate(res_list):\n res_2 = self.input_df['domain_ids'][num] + res_2\n # Accounts for interactions between residue pairs where one\n # residue is in the beta-barrel/sandwich domain and the\n # other is within a loop region\n aa_id = self.input_df[int_fasta][num][res_index]\n if not res_2 in list(G.nodes()):\n G.add_node(res_2, type='loop', aa_id=aa_id)\n if aa_id != G.nodes()[res_2]['aa_id']:\n print(aa_id, G.nodes()[res_2]['aa_id'])\n raise ValueError(\n 'Identity of node {} is inconsistent according to '\n 'the pairwise interactions listed in {} '\n '{}'.format(res_2, self.input_df_path, edge_label)\n )\n\n # Ensures interactions are only added to the network once\n if G.has_edge(res_1, res_2) is False:\n G.add_edge(res_1, res_2, interaction=edge_label)\n elif G.has_edge(res_1, res_2) is True:\n attributes = [val for label, sub_dict in\n dict(G[res_1][res_2]).items() for key,\n val in sub_dict.items()]\n if not edge_label in attributes:\n G.add_edge(res_1, res_2, interaction=edge_label)\n\n return G", "def build_graph(self):\n for node in self.nodes:\n self.graph.add_node(node.id, node_obj=node)\n edges = []\n for i in range(0, len(self.nodes)):\n for j in range(i+1, len(self.nodes)):\n if (self.nodes[i].distance(self.nodes[j]) < self.radio_range):\n edges.append((self.nodes[i].id, self.nodes[j].id,1))\n self.graph.add_weighted_edges_from(edges)", "def __generate_edges(self):\r\n edges = []\r\n for vertex in self.__graph_dict:\r\n for neighbor in self.__graph_dict[vertex]:\r\n if {neighbor, vertex} not in edges:\r\n edges.append({vertex, neighbor})\r\n return edges", "def gen_nodes(self):\n self.nodes = []\n for i in range(self.num_nodes):\n self.nodes.append(Node(self.fk))", "def make_euler_circuit(start_node, updated_graph_instance):\n\n current_edges_on_graph_list = make_edges_list(updated_graph_instance.edges_dict)\n\n current_node = start_node\n\n node_visit_order = [current_node]\n edge_visit_order = []\n\n # print(\"\\n\\n\\ncurrent_edges_on_graph_list:\", current_edges_on_graph_list)\n\n while len(current_edges_on_graph_list) > 0:\n\n # print(\"current_edges_on_graph_list:\", current_edges_on_graph_list)\n # while there are still edges on the graph, keep traversing\n\n current_bridges_on_graph = get_bridges(current_edges_on_graph_list)\n\n edges_conn_to_current_node = get_all_conn_edges_remaining_in_graph(\n current_node, current_edges_on_graph_list, updated_graph_instance.nodes_dict\n )\n\n edge_to_traverse = choose_edge_to_traverse(\n current_node, edges_conn_to_current_node, current_bridges_on_graph\n )\n\n if edge_to_traverse in current_edges_on_graph_list:\n\n current_edges_on_graph_list.remove(edge_to_traverse)\n\n else:\n\n current_edges_on_graph_list.remove(edge_to_traverse[::-1])\n\n edge_to_traverse_list = list(edge_to_traverse)\n # remove current node from edge to traverse\n edge_to_traverse_list.remove(current_node)\n # update current node to be the only node left in the edge list\n\n # update edge traveral list with edge just traversed\n edge_traversed = (current_node, edge_to_traverse_list[0])\n\n edge_visit_order.append(edge_traversed)\n\n current_node = edge_to_traverse_list[0]\n\n # add the new current node to the nodes visit order list\n node_visit_order.append(current_node)\n\n # add node visit order and edge_visit order to graph instance\n\n updated_graph_instance.node_visit_order = node_visit_order\n\n updated_graph_instance.edge_visit_order = edge_visit_order\n\n updated_graph_instance.node_geojson = make_node_geojson(updated_graph_instance)\n\n updated_graph_instance.edge_geojson = make_edge_geojson(updated_graph_instance)\n\n updated_graph_instance.route_geojson = make_route_geojson(updated_graph_instance)\n\n print(\"\\n\\n\\n\\n\\nROUTE COLLECTION\", updated_graph_instance.route_geojson)\n\n print(\"check done\")\n\n return updated_graph_instance", "def start_peers(self):\n for i in self.nodes:\n i.start()", "def make_connected(self):\r\n if nx.is_connected(self.g): return\r\n import random\r\n cc = list( nx.connected_components(self.g) )\r\n nodes = [random.sample(cluster,1)[0] for cluster in cc]\r\n for n1,n2 in zip(nodes[:-1],nodes[1:]):\r\n self.factors.append(factor(var=[n1,n2],card=self.cardVec[[n1,n2]],val=scipy.ones(4)))\r\n self.update()", "def __generate_edges(self):\n\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append( {vertex,neighbour} )\n return edges", "def getEdges(self):\n # for node in graph,\n # return node -> node for j in graph[node]\n\n return [\"->\".join([str(n1), str(n2)]) for n1 in self.graph.keys() for n2 in self.graph[n1]]", "def generate(self):\n self.graph_repl = self.master.graph_repl", "def discover_peers():\n # TODO: Disable this function if peer discoverability is disabled in config\n\n peer_manager = load_plugin(\"chain.plugins.peers\")\n peers = peer_manager.peers()\n # Shuffle peers so we always get the peers from the different peers at the start\n random.shuffle(peers)\n for index, peer in enumerate(peers):\n his_peers = peer.fetch_peers()\n for his_peer in his_peers:\n add_peer(\n ip=his_peer.ip,\n port=his_peer.port,\n chain_version=his_peer.chain_version,\n nethash=his_peer.nethash,\n os=his_peer.os,\n )\n\n # Always get peers from at least 4 sources. As add_peer is async,\n # `has_minimum_peers` might actually return wrong result, but that will only\n # increase the number of peers we have.\n if index >= 4 and peer_manager.has_minimum_peers():\n break\n\n reverify_all_peers()", "def _CreateGraph(self):\n self.nodes = []\n self.edges = []\n for i, r in self.airports.set_index('airport_id').iterrows():\n self.nodes.append((i,r.to_dict()))\n for i, r in self.routes.set_index(['src_id','dst_id']).iterrows():\n self.edges.append((i[0],i[1],r.to_dict()))\n # print('node ex: {}'.format(self.nodes[0]))\n # print('edge ex: {}'.format(self.edges[0]))\n\n self.graph = self._CreateAdjacencyListGraph()", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n edges.append({vertex, neighbour})\n return edges", "def render_peers(self):\n return [peer.jsonify() for peer in self.peers]", "def update(self):\n\n for node in self.nodes:\n for edge in node.edges:\n for i, edge_node in enumerate(edge.nodes):\n if edge_node.id != node.id:\n edge_node.add_edge(edge)\n\n return self", "def graph(self):\n graph = nx.DiGraph()\n for name, joint in self.joints.items():\n graph.add_edge(*joint.connects, joint=name)\n return graph", "def make_conn_graph(interaction_logs):\n G = pgv.AGraph(directed=True)\n\n for module_id in interaction_logs['module_id'].unique():\n G.add_node(module_id, label='module')\n\n grouped = interaction_logs.groupby('user_id')\n for user_id, group in grouped:\n G.add_node(user_id, label='student')\n for module_id in set(group['module_id'].values):\n G.add_edge(user_id, module_id)\n\n return G", "def polyConnectComponents(*args, adjustEdgeFlow: Union[float, bool]=0.0, caching: bool=True,\n constructionHistory: bool=True, insertWithEdgeFlow: bool=True, name:\n AnyStr=\"\", nodeState: Union[int, bool]=0, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def create_basic_cyclic_adjacency_map():\n sample_adj_map = {\n \"A\": [\"B\", \"C\"],\n \"C\": [\"D\"],\n \"D\": [\"E\"],\n \"E\": [\"C\"]\n }\n graph = generate_graph(sample_adj_map, node_start_name=\"A\")\n return graph", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({vertex, neighbour})\n return edges", "def test_peers_get(self):\n pass", "def generate_connection_i(self,N_e):\n raise NotImplementedError", "def network(self):\n G = nx.MultiDiGraph()\n reaction_hash = []\n product_count = 0\n mapping = {}\n reaction_count = 0\n\n for r in self.reactions:\n reaction_count += 1\n\n reaction_dict = r.__dict__\n G.add_edge(reaction_dict.get('left'), hash(r))\n G.add_edge(reaction_dict.get('right'), hash(r))\n G.add_edge(hash(r), reaction_dict.get('left2'))\n G.add_edge(hash(r), reaction_dict.get('right2'))\n\n product_count += 1\n mapping[reaction_dict.get('left')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('right')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('left2')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('right2')] = \"x{}\".format(product_count)\n\n mapping[hash(r)] = \"r{}\".format(reaction_dict.get(\"reaction_n\"))\n reaction_hash.append(hash(r))\n\n return G, mapping", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({neighbour, vertex})\n return edges", "def add_edges(self):\n for u in self.G.nodes():\n for v in self.G.nodes():\n if u != v and u != \"Sink\" and v != \"Source\":\n self.G.add_edge(\n u, v, cost=self.manhattan(u, v), time=self.manhattan(u, v)\n )", "def _make_connection(user=None, friend=None, update=False, updated=[]):\n created = 0\n updated_count = 0\n\n if not update:\n # if we have other connections here:\n conns_inst_to = Degree.objects.filter(to_user=user)\n conns_inst_from = Degree.objects.filter(from_user=user)\n conns_frnd_to = Degree.objects.filter(to_user=friend)\n conns_frnd_from = Degree.objects.filter(from_user=friend)\n #caching results\n pickle_str = cPickle.dumps(conns_inst_to)\n qs1 = cPickle.loads(pickle_str)\n pickle_str = cPickle.dumps(conns_inst_from)\n qs2 = cPickle.loads(pickle_str)\n pickle_str = cPickle.dumps(conns_frnd_to)\n qs3 = cPickle.loads(pickle_str)\n pickle_str = cPickle.dumps(conns_frnd_from)\n qs4 = cPickle.loads(pickle_str)\n # original connection between 2 nodes\n conn, created_conn = Degree.objects.get_or_create(from_user=user, to_user=friend)\n conn.path = \"%s,%s\" % (user.id, friend.id)\n if not created_conn:\n conn.distance = 0\n update = True\n if {'user':user,'friend':friend} not in updated:\n updated.append({'user':user,'friend':friend})\n # 2 way connection\n reverse_conn, created_reverse_conn = Degree.objects.get_or_create(from_user=friend, to_user=user)\n reverse_conn.path = \"%s,%s\" % (friend.id, user.id)\n if not created_reverse_conn:\n reverse_conn.distance = 0\n update = True\n if {'user':user,'friend':friend} not in updated:\n updated.append({'user':user,'friend':friend})\n\n # saving\n conn.save()\n reverse_conn.save()\n\n\n if update:\n while updated:\n \"\"\"\n OPTIMIZE:\n maybe we could check only half of neighbours\n since we adding reverse connections to updated list\n this is not necessary\n logic here:\n #check all neighbours\n #new shortest path ?\n #yes->update check again\n \"\"\"\n #wile looop\n #import pdb;pdb.set_trace()\n updating = updated.pop()\n user = updating.get('user')\n friend = updating.get('friend')\n current = Degree.objects.get(from_user=user, to_user=friend)\n current_rev = Degree.objects.get(from_user=friend, to_user=user)\n\n neigh_inst_from = Degree.objects.filter(from_user=user, distance=0)\n # find all connections from my neighbours to newly friend\n # if distance > 1, fix\n if neigh_inst_from.count() > 0:\n for neigh in neigh_inst_from:\n try:\n conn_neigh = Degree.objects.get(to_user=friend, from_user=neigh.to_user)\n # if distance greater than current + 1\n if conn_neigh.distance > current.distance + 1:\n conn_neigh.distance = current.distance + 1\n # path should be traversed through user\n #conn_neigh.path = \"%s,%s,%s\" % (neigh.to_user.id, user.id, friend.id)\n conn_neigh.path = \"%s,%s\" % (neigh.to_user.id, current.path)\n conn_neigh.save()\n # adding to queue current pair\n if {'user':neigh.to_user,'friend':friend} not in updated:\n updated.append({'user':neigh.to_user,'friend':friend})\n updated_count += 1\n except Degree.DoesNotExist:\n continue\n # since there can be new connection\n\n neigh_inst_to = Degree.objects.filter(to_user=user, distance=0)\n if neigh_inst_to.count() > 0:\n for neigh in neigh_inst_to:\n try:\n conn_neigh = Degree.objects.get(to_user=neigh.from_user, from_user=friend)\n if conn_neigh.distance > current_rev.distance + 1:\n conn_neigh.distance = current_rev.distance + 1\n #conn_neigh.path = \"%s,%s,%s\" % (friend.id, user.id, neigh.from_user.id)\n conn_neigh.path = \"%s,%s\" % (current_rev.path, neigh.from_user.id)\n conn_neigh.save()\n if {'user':friend,'friend':neigh.from_user} not in updated:\n updated.append({'user':friend,'friend':neigh.from_user})\n updated_count += 1\n except Degree.DoesNotExist:\n continue\n\n # This will fire on reverse connections\n\n neigh_friend_from = Degree.objects.filter(from_user=friend, distance=0)\n if neigh_friend_from.count() > 0:\n for neigh in neigh_friend_from:\n try:\n conn_neigh = Degree.objects.get(to_user=user, from_user=neigh.to_user)\n if conn_neigh.distance > current_rev.distance + 1:\n conn_neigh.distance = current_rev.distance + 1\n #conn_neigh.path = \"%s,%s,%s\" % (neigh.to_user.id, friend.id, user.id)\n conn_neigh.path = \"%s,%s\" % (neigh.to_user.id, current_rev.path)\n conn_neigh.save()\n if {'user':neigh.to_user,'friend':user} not in updated:\n updated.append({'user':neigh.to_user,'friend':user})\n updated_count += 1\n except Degree.DoesNotExist:\n continue\n\n neigh_friend_to = Degree.objects.filter(to_user=friend, distance=0)\n if neigh_friend_to.count() > 0:\n for neigh in neigh_friend_to:\n try:\n conn_neigh = Degree.objects.get(to_user=neigh.from_user, from_user=user)\n if conn_neigh.distance > current.distance + 1:\n conn_neigh.distance = current.distance + 1\n #conn_neigh.path = \"%s,%s,%s\" % (user.id, friend.id, neigh.from_user.id)\n conn_neigh.path = \"%s,%s\" % (current.path, neigh.from_user.id)\n conn_neigh.save()\n if {'user':user,'friend':neigh.from_user} not in updated:\n updated.append({'user':user,'friend':neigh.from_user})\n updated_count += 1\n except Degree.DoesNotExist:\n continue\n\n logger = logging.getLogger(__name__)\n logger.warning('We updated: %s records, total: %s users' % (updated_count, UserProfile.objects.count()))\n\n\n #!!! This implementation will not work on ALL dependants,\n # since not all dependats have paths traversing through current nodes\n\n\n # find all dependent nodes\n # dependent means current nodes, should be in their paths\n # since we have problems with sqlite, we will use where\n \"\"\"\n dependants = Degree.objects.extra(where=[\"path like '%%\"+str(friend.id)+\"%%\"+str(user.id)+\"%%'\"])\n dependants_reverse = Degree.objects.extra(where=[\"path like '%%\"+str(user.id)+\"%%\"+str(friend.id)+\"%%'\"])\n dependant = dependants.count() + dependants_reverse.count()\n if dependant:\n for dep in dependants:\n dep.path = re.sub(r\"(%s)(?:[,0-9])+(%s)\" % (friend.id,user.id) ,r\"\\1,\\2\", dep.path)\n dep.distance = len(dep.path.split(',')) - 2\n dep.save()\n for dep in dependants_reverse:\n dep.path = re.sub(r\"(%s)(?:[,0-9])+(%s)\" % (user.id,friend.id) ,r\"\\1,\\2\", dep.path)\n dep.distance = len(dep.path.split(',')) - 2\n dep.save()\n\n logger = logging.getLogger(__name__)\n logger.warning('We updated: %s records' % (dependant))\n \"\"\"\n else:\n qs1_count = qs1.count()\n qs2_count = qs2.count()\n qs3_count = qs3.count()\n qs4_count = qs4.count()\n # We only creating connections for current user\n # but we also need connections for all neighbours\n if qs1_count > 0:\n created += qs1_count\n if qs4_count > 0:\n created += (qs1_count) * (qs4_count - 1)\n # we need to create passive connection for every connected user\n for cn in qs1:\n Degree.objects.get_or_create(from_user=cn.from_user,\\\n to_user=friend,\\\n path=\"%s,%s\" % (cn.path, friend.id),\\\n distance = cn.distance + 1)\n # we also need conn's for every neighbour of this user\n for cnn in qs4:\n Degree.objects.get_or_create(from_user=cn.from_user,\\\n to_user=cnn.to_user,\\\n path=\"%s,%s\" % (cn.path, cnn.path),\\\n distance = cn.distance + 1 + cnn.distance + 1)\n\n # and reverse\n if qs2_count > 0:\n created += qs2_count\n if qs3_count > 0:\n created += (qs2_count) * (qs3_count - 1)\n # we need to create passive connection for every connected user\n for cn in qs2:\n Degree.objects.get_or_create(from_user=friend,\\\n to_user=cn.to_user,\\\n path=\"%s,%s\" % (friend.id, cn.path),\\\n distance = cn.distance + 1)\n for cnn in qs3:\n Degree.objects.get_or_create(from_user=cnn.from_user,\\\n to_user=cn.to_user,\\\n path=\"%s,%s\" % (cnn.path, cn.path),\\\n distance = cnn.distance + 1 + cn.distance + 1)\n\n # if we have other connections here:\n if qs3_count > 0:\n created += qs3_count\n if qs2_count > 0:\n created += (qs3_count) * (qs2_count - 1)\n # we need to create passive connection for every connected user\n for cn in qs3:\n Degree.objects.get_or_create(from_user=cn.from_user,\\\n to_user=user,\\\n path=\"%s,%s\" % (cn.path, user.id),\\\n distance = cn.distance + 1)\n for cnn in qs2:\n Degree.objects.get_or_create(from_user=cn.from_user,\\\n to_user=cnn.to_user,\\\n path=\"%s,%s\" % (cn.path, cnn.path),\\\n distance = cn.distance + 1 + cnn.distance + 1)\n # we also want to make reverse connection here,\n # since we dont know who is friended by whoom\n if qs4_count > 0:\n created += qs4_count\n if qs1_count > 0:\n created += (qs4_count) * (qs1_count - 1)\n for cn in qs4:\n Degree.objects.get_or_create(from_user=user,\\\n to_user=cn.to_user,\\\n path=\"%s,%s\" % (user.id, cn.path),\\\n distance = cn.distance + 1)\n for cnn in qs1:\n Degree.objects.get_or_create(from_user=cnn.from_user,\\\n to_user=cn.to_user,\\\n path=\"%s,%s\" % (cnn.path, cn.path),\\\n distance = cnn.distance + 1 + cn.distance + 1)\n # saving later, so we wont able to see new connection above\n # this is not necessary since we caching results above\n # saving above\n\n logger = logging.getLogger(__name__)\n logger.warning('We created: %s new connections, total: %s users' % (created, UserProfile.objects.count()))\n\n return True", "def gen_graph(self, seed=None):\n block = make_residue_graph(self.molecule, attrs=('resid', 'resname'))\n resnames = nx.get_node_attributes(block, 'resname')\n graph = nx.Graph()\n graph.add_nodes_from(block.nodes)\n graph.add_edges_from(block.edges)\n nx.set_node_attributes(graph, resnames, \"resname\")\n return graph", "def _get_connections(self) -> _ConnectionsMap:\n seen: Dict[int, Any] = {}\n for parent in self.target.ancestors:\n if not isinstance(parent, NodeInstance):\n continue\n if parent is self.target.root:\n break\n if self.operation_host:\n self._get_connection(self.operation_host, parent, seen)\n self._get_connection(self.target.root, parent, seen)\n # get the rest of the default connections\n self._get_connection(self.target.root, None, seen)\n\n # reverse so nearest relationships replace less specific ones that have matching names\n connections = _ConnectionsMap( # the list() is for Python 3.7\n (rel.name, rel) for rel in reversed(list(seen.values()))\n )\n return connections", "def all_pairs_node_connectivity(G, nbunch=None, cutoff=None):\n if nbunch is None:\n nbunch = G\n else:\n nbunch = set(nbunch)\n\n directed = G.is_directed()\n if directed:\n iter_func = itertools.permutations\n else:\n iter_func = itertools.combinations\n\n all_pairs = {n: {} for n in nbunch}\n\n for u, v in iter_func(nbunch, 2):\n k = local_node_connectivity(G, u, v, cutoff=cutoff)\n all_pairs[u][v] = k\n if not directed:\n all_pairs[v][u] = k\n\n return all_pairs", "def nodes(self, irc, msg, args, channel):\n pie = self.instances[irc.network]\n irc.reply([str(x) for x in pie.graphs[channel].nodes.values()])", "def E_cycle(adj_dict, num_edges, num_lines):\n\n #current_node = 0 #arbitrarily choose node 0 to start\n\n #set current_node as key in adj_dict when using strings instead of node numbers\n keys = adj_dict.keys()\n current_node = keys[0]\n\n path = []\n seen_edges = 0\n seen_and_extra_edges = [] #for backtracking\n\n while seen_edges != num_edges:\n if len(adj_dict[current_node]) != 0:\n #if there is another outgoing edge\n path.append(current_node)\n next_node = adj_dict[current_node][0] #get the next unseen edge\n adj_dict[current_node].remove(next_node)\n #remove edge so that it won't be visited twice\n if len(adj_dict[current_node]) !=0:\n #if there is another outgoing edge, add it to backtracking list\n seen_and_extra_edges.append(current_node)\n seen_edges +=1\n current_node = next_node\n else:\n #made a bad choice, need to start a new sub-cycle\n #print(seen_and_extra_edges)\n #print(path)\n current_node = seen_and_extra_edges[0]\n seen_and_extra_edges.remove(current_node)\n\n #put the previous sub-cycle into the path\n temp_path = []\n new_start = path.index(current_node)\n temp_path = path[new_start:] #from the restart node to the end\n temp_path += path[:new_start] #from the beginning to the restart node\n path = temp_path\n\n #append the last elt\n source = path[0]\n path+=[source]\n return path", "def getGraphWhichCouldBeWorsenedBySwitch(self):\n makeLayers = self.makeLayers\n addNodesToLayer = self.addNodesToLayer\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n setInLayerOrderConstraint = self.setInLayerOrderConstraint\n\n layers = makeLayers(3)\n leftNodes = addNodesToLayer(2, layers[0])\n middleNodes = addNodesToLayer(2, layers[1])\n rightNodes = addNodesToLayer(2, layers[2])\n\n setInLayerOrderConstraint(leftNodes[0], leftNodes[1])\n setInLayerOrderConstraint(rightNodes[0], rightNodes[1])\n\n eastWestEdgeFromTo(middleNodes[0], rightNodes[1])\n eastWestEdgeFromTo(middleNodes[1], rightNodes[0])\n eastWestEdgeFromTo(leftNodes[0], middleNodes[0])\n eastWestEdgeFromTo(leftNodes[0], middleNodes[0])\n eastWestEdgeFromTo(leftNodes[1], middleNodes[1])\n eastWestEdgeFromTo(leftNodes[1], middleNodes[1])\n\n return self.graph", "def createGraph(self):\n \n for episode in self.episodes:\n listeSuccessors = [episode[episode[:,1] > episode[i,1]][:,0] # List of list of successors for each user\n for i in range(len(episode))] \n for i, successeur in enumerate(listeSuccessors): # for the list of successors of each user\n for v in successeur: # for every successor of a user\n u, proba = episode[i,0], np.random.random() # Generate a probability so within (0,1)\n self.successors[u][v] = proba # u ---(proba)---> v \n self.predecessors[v][u] = proba # v ---(proba)---> u", "def get_graph_karateclub():\n all_members = set(range(34))\n club1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 16, 17, 19, 21}\n # club2 = all_members - club1\n\n G = eg.Graph(name=\"Zachary's Karate Club\")\n for node in all_members:\n G.add_node(node+1)\n\n zacharydat = \"\"\"\\\n0 1 1 1 1 1 1 1 1 0 1 1 1 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 1 0 0\n1 0 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 0\n1 1 0 1 0 0 0 1 1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 1 0\n1 1 1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1\n0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1\n1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 1 0 0 1 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1\n0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1\n0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 1 1\n0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 1 0 0 0 1 1\n0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 1 0 0 1 0 1 0 1 1 0 0 0 0 0 1 1 1 0 1\n0 0 0 0 0 0 0 0 1 1 0 0 0 1 1 1 0 0 1 1 1 0 1 1 0 0 1 1 1 1 1 1 1 0\"\"\"\n\n for row, line in enumerate(zacharydat.split('\\n')):\n thisrow = [int(b) for b in line.split()]\n for col, entry in enumerate(thisrow):\n if entry == 1:\n G.add_edge(row+1, col+1)\n\n # Add the name of each member's club as a node attribute.\n for v in G:\n G.nodes[v]['club'] = 'Mr. Hi' if v in club1 else 'Officer'\n return G", "def _makeEdges(self):\n self.edges = set()\n\n for i in range(self.size):\n self.edges.add(makePair(self.tour[i - 1], self.tour[i]))", "def connexify(self, estimator, nb_connect=5, verbose=False):\n connex_groups_id = list(self.graph.connex_groups)\n connex_pairs = permutations(connex_groups_id, 2)\n new_edges = []\n for conidx1, conidx2 in connex_pairs:\n for _ in range(nb_connect):\n node_idx1 = random.choice(self.graph.connex_groups[conidx1])\n node_idx2 = random.choice(self.graph.connex_groups[conidx2])\n state1 = self.graph.nodes[node_idx1]\n state2 = self.graph.nodes[node_idx2]\n success, X_opt, U_opt, V_opt = self.opt_trajectories(\n (state1, state2), estimator,\n verbose=verbose)\n if success:\n new_edges.append(((node_idx1, node_idx2),\n X_opt, U_opt, V_opt))\n\n for edge in new_edges:\n self.graph.add_edge(*edge)", "def connected_component(self):\n t1 = datetime.datetime.now()\n nodes = set(x.hex for x in self.agents)\n result = []\n while nodes:\n node = nodes.pop()\n # This set will contain the next group of nodes connected to each other.\n group = {node}\n # Build a queue with this node in it.\n queue = [node]\n # Iterate the queue.\n # When it's empty, we finished visiting a group of connected nodes.\n while queue:\n # Consume the next item from the queue.\n node = queue.pop(0)\n # Fetch the neighbors.\n neighbors = set(x for x in node.fon if x.is_occupied == 1)\n # Remove the neighbors we already visited.\n neighbors.difference_update(group)\n # Remove the remaining nodes from the global set.\n nodes.difference_update(neighbors)\n # Add them to the group of connected nodes.\n group.update(neighbors)\n # Add them to the queue, so we visit them in the next iterations.\n queue.extend(neighbors)\n\n # Add the group to the list of groups.\n result.append(len(group))\n td = datetime.datetime.now() - t1\n print(\"calculated {} connected components in {} seconds\".format(len(result),td.total_seconds()))\n return len(result), np.histogram(result, self.cluster_hist_breaks)[0]", "def edges(self, irc, msg, args, channel):\n pie = self.instances[irc.network]\n irc.reply([str(x) for x in pie.graphs[channel].edges.values()])", "def build_graph(self):\n for each_list in self.lab.look():\n vertice = self._add_vertice(each_list)\n if vertice:\n self.unvisited.add(vertice)\n self.graph.addEdge((self.current, vertice))\n \n self.unvisited -= self.visited\n self._connect_neighbours()", "def get_bubble_chain(self, same_color_chain_list):\n\n\t\tif self.color in self.adj_dict.keys():\n\t\t\tfor bubble in self.adj_dict[self.color]:\n\t\t\t\tif bubble not in same_color_chain_list:\n\t\t\t\t\tsame_color_chain_list.append(bubble) \n\t\t\t\t\tbubble.get_bubble_chain(same_color_chain_list)\n\t\treturn same_color_chain_list", "def _construct_graph(self):\n raise NotImplementedError", "def send_peers_info(conn, session, ip):\n peers = [cli.ip for cli in session.cooplist if cli.ip != ip]\n if not peers:\n peers = []\n pkt = CCPacket(CCHeader(MSG['PEERINFO']), peers)\n conn.send(pkt.packed())", "def build_graph(self, graph, inst_name, port_nets):\n return", "def connections(self, recurse = True):\n \n return NeuroObject.connections(self, recurse) + [self.root] + self.arborizations(False) + self.gapJunctions(False) + self.innervations(False) + self.synapses(False)", "def edges(self):\r\n return self.__generate_edges()", "def run(self, dag):\n self.property_set[\"commutation_set\"] = defaultdict(list)\n pending_1q = [list() for _ in range(dag.num_qubits())]\n block_id = [-(i + 1) for i in range(dag.num_qubits())]\n current_id = 0\n block_list = list()\n to_qid = dict()\n for i, qubit in enumerate(dag.qubits):\n to_qid[qubit] = i\n for node in dag.topological_op_nodes():\n qids = [to_qid[q] for q in node.qargs]\n if (\n not isinstance(node.op, Gate)\n or len(qids) > 2\n or node.op.condition\n or node.op.is_parameterized()\n ):\n for qid in qids:\n if block_id[qid] > 0:\n block_list[block_id[qid]].extend(pending_1q[qid])\n block_id[qid] = -(qid + 1)\n pending_1q[qid].clear()\n continue\n\n if len(qids) == 1:\n b_id = block_id[qids[0]]\n if b_id < 0:\n pending_1q[qids[0]].append(node)\n else:\n block_list[b_id].append(node)\n elif block_id[qids[0]] == block_id[qids[1]]:\n block_list[block_id[qids[0]]].append(node)\n else:\n block_id[qids[0]] = current_id\n block_id[qids[1]] = current_id\n new_block = list()\n if pending_1q[qids[0]]:\n new_block.extend(pending_1q[qids[0]])\n pending_1q[qids[0]].clear()\n if pending_1q[qids[1]]:\n new_block.extend(pending_1q[qids[1]])\n pending_1q[qids[1]].clear()\n new_block.append(node)\n block_list.append(new_block)\n current_id += 1\n\n self.property_set[\"block_list\"] = [tuple(block) for block in block_list]\n return dag", "def multi_edge():\n from networkx.readwrite import json_graph\n import networkx as nx\n import autonetkit\n # returns a house graph\n data = {'directed': False,\n 'graph': [],\n 'links': [{'_ports': {'r4': 2, 'r5': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 1},\n {'_ports': {'r2': 3, 'r4': 1},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r2': 4, 'r4': 3},\n 'raw_interfaces': {},\n 'source': 0,\n 'target': 3},\n {'_ports': {'r3': 3, 'r5': 2},\n 'raw_interfaces': {},\n 'source': 1,\n 'target': 4},\n {'_ports': {'r1': 1, 'r2': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 3, 'r2': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 3},\n {'_ports': {'r1': 2, 'r3': 1},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 4, 'r3': 4},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r1': 5, 'r3': 5},\n 'raw_interfaces': {},\n 'source': 2,\n 'target': 4},\n {'_ports': {'r2': 2, 'r3': 2},\n 'raw_interfaces': {},\n 'source': 3,\n 'target': 4}],\n 'multigraph': True,\n 'nodes': [{'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r4 to r5', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r4 to r2', 'id': 'eth2'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r4',\n 'label': 'r4',\n 'x': 675,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r5 to r4', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r5 to r3', 'id': 'eth1'}},\n 'asn': 2,\n 'device_type': 'router',\n 'id': 'r5',\n 'label': 'r5',\n 'x': 675,\n 'y': 500},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r1 to r2', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r1 to r3', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r1',\n 'label': 'r1',\n 'x': 350,\n 'y': 400},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r2 to r3', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r2 to r4', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r2 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r2',\n 'label': 'r2',\n 'x': 500,\n 'y': 300},\n {'_ports': {0: {'category': 'physical', 'description': None},\n 1: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth0'},\n 2: {'category': 'physical', 'description': 'r3 to r2', 'id': 'eth1'},\n 3: {'category': 'physical', 'description': 'r3 to r5', 'id': 'eth2'},\n 4: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth3'},\n 5: {'category': 'physical', 'description': 'r3 to r1', 'id': 'eth4'}},\n 'asn': 1,\n 'device_type': 'router',\n 'id': 'r3',\n 'label': 'r3',\n 'x': 500,\n 'y': 500}]}\n graph = json_graph.node_link_graph(data)\n anm = autonetkit.anm.NetworkModel()\n g_in = anm.add_overlay(\"input\")\n g_in._replace_graph(nx.MultiGraph(graph))\n # TODO: check if should build overlays here rather than clone in?\n g_phy = anm[\"phy\"]\n g_phy._replace_graph(graph)\n return anm", "def _generate_vertexes(self):\n # generate list of sets for each vms\n self._graph = [set() for _ in range(self._vm_count)]", "def connect_peers(self):\n assert self.targets\n # connect closest node to target id\n for t in self.targets:\n if len(self.connections) >= self.max_peers:\n break\n for knode in self.proto.routing.neighbours(devp2p.kademlia.Node.from_id(t['address'])):\n assert isinstance(knode, devp2p.kademlia.Node)\n # assure within tolerance\n if knode.id_distance(t['address']) < t['tolerance']:\n # make sure we are not connected yet\n remote = self.network[knode.id]\n if remote not in self.connections:\n if remote.receive_connect(self):\n t['connected'] = True\n self.connections.append(remote)\n break", "def __toNetworkX(self):\n G = nx.Graph()\n G.add_nodes_from(range(self.n))\n for u in range(self.n):\n for v in range(self.n):\n if self.adjacent(u, v):\n G.add_edge(u, v)\n\n return G", "def makeGraphDictionary(self):\n graph_dict_incomplete = {}\n # dictionary contains all links, no matter if they are functional\n for i in range(0, len(self._partner_indices)):\n graph_dict_incomplete[i] = set(self._partner_indices[i])\n if self._variant[0] == \"V0_instant\":\n self.graph_dict = graph_dict_incomplete\n else:\n # helper\n link_list = []\n link_list2 = []\n for vertex in graph_dict_incomplete:\n self.setKeyDictionary(dictionary=self.graph_dict,\n key=vertex,\n value=set())\n for neighbour in graph_dict_incomplete[vertex]:\n # Iterate through all plants and the neighbours\n # If a new pair occurs it will be appended in link_list2\n # If the pair occurs again it wll be appended in link_list\n # This means that the link (or rgf process) is finished\n # for both plants\n if {neighbour, vertex} not in link_list2:\n link_list2.append({vertex, neighbour})\n else:\n # plants are only put in the dict. if they occur more\n # than once, i.e. both partners have finished rgf\n link_list.append({vertex, neighbour})\n self.setKeyDictionary(dictionary=self.graph_dict,\n key=vertex,\n value=neighbour)", "def __init__(self, seed, size):\n self.seed = seed\n self.size = size\n\n self.name = str(time())\n # The attribute 'generation_count' will be incremented automatically by the game Gadakeco.\n self.generation_count = 1\n\n self.current_generation = []\n for i in range(size):\n new = Network()\n mutated = new.edge_mutation()\n self.current_generation.append(mutated)", "def run(self, dag):\n # Initiate the commutation set\n self.property_set['commutation_set'] = defaultdict(list)\n\n # Build a dictionary to keep track of the gates on each qubit\n # The key with format (wire_name) will store the lists of commutation sets\n # The key with format (node, wire_name) will store the index of the commutation set\n # on the wire with wire_name, thus, for example:\n # self.property_set['commutation_set'][wire_name][(node, wire_name)] will give the\n # commutation set that contains node.\n\n for wire in dag.wires:\n wire_name = \"{0}[{1}]\".format(str(wire.register.name), str(wire.index))\n self.property_set['commutation_set'][wire_name] = []\n\n # Add edges to the dictionary for each qubit\n for node in dag.topological_op_nodes():\n for (_, _, edge_data) in dag.edges(node):\n\n edge_name = edge_data['name']\n self.property_set['commutation_set'][(node, edge_name)] = -1\n\n # Construct the commutation set\n for wire in dag.wires:\n wire_name = \"{0}[{1}]\".format(str(wire.register.name), str(wire.index))\n\n for current_gate in dag.nodes_on_wire(wire):\n\n current_comm_set = self.property_set['commutation_set'][wire_name]\n if not current_comm_set:\n current_comm_set.append([current_gate])\n\n if current_gate not in current_comm_set[-1]:\n prev_gate = current_comm_set[-1][-1]\n does_commute = False\n try:\n does_commute = _commute(current_gate, prev_gate, self.cache)\n except TranspilerError:\n pass\n if does_commute:\n current_comm_set[-1].append(current_gate)\n\n else:\n current_comm_set.append([current_gate])\n\n temp_len = len(current_comm_set)\n self.property_set['commutation_set'][(current_gate, wire_name)] = temp_len - 1", "def to_networkx(self):\n import networkx as nx\n G = nx.DiGraph()\n # G.graph.update(self.config)\n\n if nx.__version__.startswith('1'):\n node_dict = G.node\n else:\n node_dict = G.nodes\n\n def _defaultstyle(node, color, shape='none', **kwargs):\n node_dict[node]['fillcolor'] = color\n node_dict[node]['style'] = 'filled'\n node_dict[node]['shape'] = shape\n node_dict[node].update(kwargs)\n # node_dict[node]['color'] = color\n\n # Add all processes\n # Make inputs and outputs nodes to prevent needing a multigraph\n for proc in self.procs.values():\n G.add_node(proc.name)\n _defaultstyle(proc.name, 'turquoise', shape='ellipse', fontsize=20)\n node_dict[proc.name]['label'] = '{}:\\n{}'.format(proc.name,\n proc.type)\n\n for iport in proc.iports.values():\n iport_name = iport.absname()\n G.add_node(iport_name)\n G.add_edge(iport_name, proc.name)\n node_dict[iport_name]['label'] = iport.name\n _defaultstyle(iport_name, '#fefefe', fontsize=14)\n\n for oport in proc.oports.values():\n oport_name = oport.absname()\n G.add_node(oport_name)\n G.add_edge(proc.name, oport_name)\n node_dict[oport_name]['label'] = oport.name\n _defaultstyle(oport_name, '#f0f0f0', fontsize=14)\n\n # Add all connections\n for proc in self.procs.values():\n for iport in proc.iports.values():\n iport_name = iport.absname()\n for oport in iport.connections:\n if oport is not None:\n oport_name = oport.absname()\n G.add_edge(oport_name, iport_name)\n return G", "def _showConnectionGraph(self):\n self._console_output(\"Creating connect graph...\")\n res = True\n\n u = InfoUI.function_orig_ea\n v = InfoUI.function_dest_ea\n\n cg = self.ba.get_connect_graph(u, v)\n res = self.ba.show_connect_graph(cg)\n\n if not res:\n self._console_output(\n \"[x] No connection between %08x and %08x\" % (u, v),\n err = True)", "def peers(self):\n\n peers_data = ''\n for peer in getattr(self._peer, 'peers', []):\n peers_data += peer.config().remote_config\n return peers_data", "def update_nodes(self):\n raise NotImplementedError('ERROR: sweeper has to implement update_nodes(self)')", "def set_connection_between_nodes(self):\n\n for i, node in enumerate(self.list_empty_nodes):\n line = node.labyrinth_position[0]\n column = node.labyrinth_position[1]\n\n for j in range(i+1, len(self.list_empty_nodes)):\n line_j = self.list_empty_nodes[j].labyrinth_position[0]\n column_j = self.list_empty_nodes[j].labyrinth_position[1]\n \n if i != j and ((line == line_j and column == column_j - 1) \\\n or (line == line_j and column == column_j + 1) \\\n or (column == column_j and line == line_j - 1) \\\n or (column == column_j and line == line_j + 1)) \\\n and (not node in self.list_empty_nodes[j].connected_to) \\\n and (not self.list_empty_nodes[j] in node.connected_to):\n node.connected_to.append(self.list_empty_nodes[j])\n self.list_empty_nodes[j].connected_to.append(node)", "def getAllEdges(mutations):\n\tallEdges = []\n\tfor mutation in mutations: \n\t\tprint mutation\n\t\tfor mutation2 in mutations:\n\t\t\tif not (mutation == mutation2): # No edges connecting to themselves.\n\t\t\t\ttmp = []\n\t\t\t\ttmp.append(mutation)\n\t\t\t\ttmp.append(mutation2)\n\t\t\t\tallEdges.append(tmp)\n\treturn allEdges", "def edges(self):\n return convert_chains_to_edges(self.chains)", "def cc_visited(ugraph):\n\tremain = []\n\tfor node in ugraph:\n\t\tremain.append(node)\n\tconnected = []\n\twhile remain:\n\t\tvisited = bfs_visited(ugraph, remain[0])\n\t\tconnected.append(visited)\n\t\tremain = [i for i in remain if not i in visited]\n\treturn connected" ]
[ "0.62271756", "0.61608636", "0.5960585", "0.58688587", "0.5849908", "0.5812597", "0.57896066", "0.57523227", "0.5716475", "0.57021713", "0.56578314", "0.56416863", "0.56310755", "0.56212884", "0.56085145", "0.5589877", "0.5569081", "0.5566458", "0.55554277", "0.5497438", "0.5494903", "0.5490833", "0.545125", "0.5423957", "0.5420847", "0.5384469", "0.53614235", "0.53487724", "0.5347462", "0.53430057", "0.53373224", "0.53373224", "0.53306484", "0.5327709", "0.53143597", "0.5295658", "0.5295644", "0.5287705", "0.52799916", "0.5279579", "0.5277738", "0.5277451", "0.5276692", "0.52762586", "0.5261141", "0.525585", "0.52546614", "0.52452564", "0.524394", "0.52439094", "0.5241672", "0.5240419", "0.52387094", "0.52224857", "0.5221947", "0.5221006", "0.52184325", "0.5212046", "0.5197978", "0.5196656", "0.5180728", "0.5174582", "0.5171687", "0.5169697", "0.516806", "0.51503384", "0.5148816", "0.5138137", "0.51259947", "0.5122554", "0.5116737", "0.51160413", "0.51153344", "0.5113536", "0.51133287", "0.5104374", "0.5103973", "0.51016384", "0.5101275", "0.5099172", "0.5094227", "0.5088732", "0.50869423", "0.50779176", "0.50735396", "0.5072475", "0.5069319", "0.505851", "0.5057866", "0.50524956", "0.50507224", "0.50473046", "0.5040676", "0.5038779", "0.50375736", "0.5035563", "0.50215393", "0.5021312", "0.50194", "0.50152594" ]
0.6303654
0
Start thread for each peer
def start_peers(self): for i in self.nodes: i.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def peer_server(self):\n try:\n listener_thread = threading.Thread(target=self.peer_server_listener)\n listener_thread.setDaemon(True)\n\n operations_thread = threading.Thread(target=self.peer_server_host)\n operations_thread.setDaemon(True)\n\n listener_thread.start()\n operations_thread.start()\n\n threads = []\n threads.append(listener_thread)\n threads.append(operations_thread)\n\n for t in threads:\n t.join()\n except Exception as e:\n print \"Peer Server Error, %s\" % e\n sys.exit(1)", "def start_workers(self):\n\n for thread in self.threads:\n thread.start()", "def start(self):\r\n for srv in self._servers:\r\n srv.start()", "def run(self):\n if self.name == \"PeerServer\":\n self.peer_server()\n elif self.name == \"PeerFileHandler\":\n self.peer_file_handler()", "def start(self):\n waiting_for_clients = Thread(target=self.accept_client)\n waiting_for_clients.start()", "def startWorkers(self):\n for i in range(self.aOT):\n t = thr.Thread(target=self.threadWorker)\n t.start()\n self.threads.append(t)", "def peer_server_host(self):\n try:\n while True:\n while not self.peer_server_listener_queue.empty():\n with futures.ThreadPoolExecutor(max_workers=8) as executor:\n conn, addr = self.peer_server_listener_queue.get()\n data_received = json.loads(conn.recv(1024))\n\n if data_received['command'] == 'obtain_active':\n fut = executor.submit(\n self.peer_server_upload, conn, data_received)\n except Exception as e:\n print \"Peer Server Hosting Error, %s\" % e", "def _init_threads(self):\n\n startTh = Thread(name='InitialStart', target = self._singleUpdate, args=(self.outPs, ))\n self.threads.append(startTh)\n\n sendTh = Thread(name='SteeringListen',target = self._listen_for_steering, args = (self.inPs[0], self.outPs, ))\n self.threads.append(sendTh)", "def execute(self):\n with self.app.peers_lock:\n for peer in self.app.peers.values():\n\n # Skip peers that have no chance at connecting.\n if peer.host is None:\n logger.log(TRACE, \"%s will not be connected as it \"\n \"doens't have a host set\", peer)\n continue\n\n if peer.state_connecting:\n self.connecting_peer(peer)\n elif peer.state_initial:\n self.connect_peer(peer)\n elif peer.state_no_connection:\n self.reconnect_peer(peer)", "def listen(self):\n while True:\n peer, address = self.server.accept()\n peer_port = int(peer.recv(self.CHUNK_SIZE).decode(self.FORMAT))\n peer.send(bytes(str(self.PEER_LIST),self.FORMAT))\n if (address[0], peer_port) not in self.PEER_LIST:\n self.PEER_LIST.append((address[0], peer_port))\n output_message = f\"{datetime.now().timestamp()} : Added <{address[0]}:{peer_port}> to the Peer_list\"\n self.dump_to_file(output_message)\n print(output_message)\n thread = threading.Thread(target=self.handlePeers, args=(peer, ))\n thread.start()", "def connectToSeeds(sock, peerQ):\n for addr, port in iter(peerQ.get, \"STOP\"):\n with lockSocketReq:\n log.debug(f\"Connecting to seed {addr}:{port}\",\"Connect to Seeds\")\n sock.connect(f\"tcp://{addr}:{port}\")\n counterSocketReq.release()\n log.info(f\"Dispatcher connected to seed with address:{addr}:{port})\", \"Connect to Seeds\")", "def setup_worker_threads(self):\n \n for thread_number in range(0, self.max_workers):\n worker = DeviceWorker(self, thread_number)\n self.worker_threads.append(worker)\n worker.start()", "def listener(localSearchForFile):\n for con in listenForConnection(GLOBALS.LOCAL_PORT):\n if con == -1: return -1\n\n if GLOBALS.DEBUG_MODE: print(\"[info] Linear lib: received connection\")\n\n # spawn connection handler thread\n superPeer = Thread(target = superPeerConnection, args=[con, localSearchForFile])\n superPeer.daemon = True\n superPeer.start()", "def setup(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request #TCP socket object for the client\n self.server.clients[(self.ip, self.port)] = self\n self.server.peers.append((self.connection)) \n for client in self.server.clients:\n print(\"Connected client: \", client)\n\n #for peer in self.server.peers:\n # print(\"Peers: \", peer)", "def initiate_threads(self):\n receive_audio_thread = threading.Thread(target=self.receive_audio)\n receive_audio_thread.start()\n send_audio_thread = threading.Thread(target=self.send_audio)\n send_audio_thread.start()\n receive_video_thread = threading.Thread(target=self.receive_video)\n receive_video_thread.start()\n send_video_thread = threading.Thread(target=self.send_video)\n send_video_thread.start()", "def start(self):\n if self.is_alive:\n self.logger.warning('Already started!')\n return\n self._create_tunnels()\n if not self.is_active:\n self._raise(BaseSSHTunnelForwarderError,\n reason='Could not establish session to SSH gateway')\n for _srv in self._server_list:\n thread = threading.Thread(\n target=self._serve_forever_wrapper,\n args=(_srv, ),\n name='Srv-{0}'.format(address_to_str(_srv.local_port))\n )\n thread.daemon = self.daemon_forward_servers\n thread.start()\n self._check_tunnel(_srv)\n self.is_alive = any(self.tunnel_is_up.values())\n if not self.is_alive:\n self._raise(HandlerSSHTunnelForwarderError,\n 'An error occurred while opening tunnels.')", "def start(self):\n\n self.__new_bus_Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.__bus_stations_Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.__ipv4 = (socket.gethostbyname(socket.gethostname()))\n if self.__telegram_bot == None:\n print(\"telegram bot connection is not set yet\")\n return\n if self.__message_sender == None:\n print(\"message sender connection is not set yet\")\n return\n new_bus_receiver = threading.Thread(target=self.__new_bus_reciever, args=(), name=\"new_bus_reciever\")\n new_bus_receiver.start()\n updates_tracker = threading.Thread(target=self.__track_updates, args=(), name=\"updates_tracker\")\n updates_tracker.start()\n heart_beat = threading.Thread(target=self.__heart, args=(), name=\"Heart beats\")\n heart_beat.start()", "def run(self):\n self.thread_send.start()\n self.thread_receive.start()", "def startMP(self):\n for w in self.consumers:\n w.start()", "def create_and_start_threads(self):\r\n self.create_threads()\r\n self.start_threads()", "def start(self):\n logger.debug(\"Starting {0} downloaders\".format(self.num_downloaders))\n for p in self._downloaders:\n # p.daemon = True\n p.start()\n logger.debug(\"Starting {0} checkers\".format(self.num_checkers))\n for p in self._checkers:\n # p.daemon = True\n p.start()", "def start_threads(count):\n for i in range(count):\n threading.Thread(target=send_pulses, args=(i,)).start()", "def __init__(self):\r\n threading.Thread.__init__(self)\r\n self.wait_to_start = True\r\n self.id = 0\r\n self.players = None\r\n self.socket = None", "def start(self):\n\n self.col2_print('Starting Listener threads', self.listeners)\n\n # Front facing socket to accept client connections.\n self.socket_front = self.zmq_context.socket(zmq.ROUTER)\n self.socket_front.router_raw = self.router_raw\n self.socket_front.setsockopt(zmq.LINGER, 1)\n self.socket_front.bind('%s://%s:%s' % (self.str_protocol,\n self.str_IP,\n self.str_port)\n )\n\n # Backend socket to distribute work.\n self.socket_back = self.zmq_context.socket(zmq.DEALER)\n self.socket_back.setsockopt(zmq.LINGER, 1)\n self.socket_back.bind('inproc://backend')\n\n # Start the 'fileIO' thread\n self.fileIO = FileIO( timeout = 60,\n within = self,\n debugFile = self.str_debugFile,\n debugToFile = self.b_debugToFile)\n self.fileIO.start()\n\n # Start the 'listener' workers... keep track of each\n # listener instance so that we can selectively stop\n # them later.\n for i in range(0, self.listeners):\n self.l_listener.append(Listener(\n id = i,\n context = self.zmq_context,\n DB = self._ptree,\n DBpath = self.str_DBpath,\n http = self.b_http,\n within = self,\n listenerSleep = self.listenerSleep,\n debugToFile = self.b_debugToFile,\n debugFile = self.str_debugFile))\n self.l_listener[i].start()\n\n # Use built in queue device to distribute requests among workers.\n # What queue device does internally is,\n # 1. Read a client's socket ID and request.\n # 2. Send socket ID and request to a worker.\n # 3. Read a client's socket ID and result from a worker.\n # 4. Route result back to the client using socket ID.\n self.dp.qprint(\"*******before zmq.device!!!\")\n try:\n zmq.device(zmq.QUEUE, self.socket_front, self.socket_back)\n except:\n self.dp.qprint('Hmmm... some error was caught on shutting down the zmq.device...')\n self.dp.qprint(\"*******after zmq.device!!!\")", "def start(self):\n self.socket_manager.start()\n\n if self.poc != None:\n self._start_thread(self.contact_poc, daemon=True)\n self.send_discovery_message(self.poc)\n self._start_thread(self.watch_for_discovery_messages, daemon=True)\n self._start_thread(self.watch_for_heartbeat_messages, daemon=True)\n self._start_thread(self.send_heartbeat_messages, daemon=True)\n self._start_thread(self.watch_for_heartbeat_timeouts, daemon=True)\n self._start_thread(self.watch_for_rtt_messages, daemon=True)\n self._start_thread(self.calculate_rtt_timer, daemon=True)\n self._start_thread(self.watch_for_app_messages, daemon=True)\n\n while True: # Blocking. Nothing can go below this\n self.check_for_inactivity()", "def start_threads(self):\r\n assert len(self.all_threads) > 0\r\n for thread in self.all_threads:\r\n thread.start()", "def _start(self, arbiter):\n self.transport_manager.start()\n for execution_manager in self.execution_managers:\n execution_manager.start()", "def run(self):\n self.tcpsock.listen(5)\n while len(self.threadlist) < 10:\n \n print(\"Waiting for incoming connections...\")\n\n # Waits for the socket to be available, then creates a thread\n # for incoming client connections.\n inputready, outputready, exceptready = select.select([self.tcpsock],[],[])\n for i in inputready:\n if i == self.tcpsock:\n (conn, (ip,port)) = self.tcpsock.accept()\n print('Got connection from ', (ip,port))\n clientthread = ClientThread(ip, port, conn, self.serverdir, self.serverindex, self.jobqueue)\n clientthread.start()\n self.threadlist['ClientThread[{}]'.format(clientthread.threadID)] = clientthread\n time.sleep(1)", "def do_connect(self, args):\r\n for host in self.host:\r\n client = paramiko.SSHClient()\r\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n client.connect(host[0], username=host[1], password=host[2])\r\n self.connections.append(client)", "def __init__(self,server_list):\n self.workers=[]\n self.worker_by_name={}\n worker_id = 1\n for host,port in server_list:\n # Add the uid here can help with port conflicts, but only works\n # on Unix clusters. We really need to work out a daemon service\n # model that makes the port mess transparent.\n port = port #+ os.getuid()\n new_worker = sync_cluster.standard_sync_client(host,port,worker_id)\n self.workers.append(new_worker)\n self.worker_by_name[host] = new_worker\n worker_id = worker_id + 1", "def start_sockets_threads():\n\n # register clean_up_threads to run on crash signals\n for sig in (SIGABRT, SIGINT, SIGTERM):\n signal(sig, ServerSockets.clean_up_threads)\n\n # spawn all listen threads\n app.socket_threads.append(spawn(ServerSockets.listen_for_users))\n app.socket_threads.append(spawn(ServerSockets.listen_for_ingredients))\n app.socket_threads.append(spawn(ServerSockets.listen_for_menu))", "def start(self):\n #Starting the thread able to handle the administrator request\n t2 = threading.Thread(target=self.handleServer)\n t2.start()\n self.ActiveThreads.append(t2)\n #Listen continously\n while True:\n self.server.listen(50)\n self.Log.log(\"Waiting for connections...\")\n #Obtaining the parameters like the socket and the address/port of the incoming connection\n (conn, (ip,port)) = self.server.accept()\n #Creating a new thread able to handle the new connection with the client\n newClient = ClientHandler(conn,ip,port,self.DB,self.Users,self.Log,self.XML);\n #Starting the new thread\n newClient.start()\n #Appending the thread to the list of active threads\n self.ActiveThreads.append(newClient)", "def start_task():\n get_results_from_message_queue()\n test_all_servers_connection()", "def start(self):\n for _id in self._workers:\n self.start_action(_id)", "def _try_peers(self, peers):\n for peer_entry in peers:\n if peer_entry['id'] == self.peer_id:\n continue\n\n print('Trying peer: {}'.format(peer_entry))\n peer = Peer(peer_entry['id'], peer_entry['ip'], peer_entry['port'], self._torrent)\n try:\n peer.connect(self.peer_id)\n except PeerConnectionError:\n continue\n else:\n self._peers.append(peer)\n peer.subscribe_for_messages_to_client(self.peer_message_receiver(peer))", "def run(self):\n for data in self.__iter_data():\n for client in self.clients:\n gevent.spawn(self.send, client, data)", "def __init__(self, threadid, name, p):\n threading.Thread.__init__(self)\n self.threadID = threadid\n self.name = name\n self.peer = p\n self.peer_server_listener_queue = Queue()", "def run(self):\n print(\"[CONNEXION_MANAGER] start connecting\")\n while True:\n self.connexion_init()", "def start(self):\n self.capturing = True\n print \"Connecting Sender\"\n self.sock.connect(self.addr)\n self.capture_thread.start()\n print \"Starting Sender\"\n self.sending_thread.start()", "def _start_servers(self):\n for user, host, port in self.server_addresses:\n remoteHost = \"%s@%s\" % (user, host)\n logger.info(\"starting remote server %s:%s\", host, port)\n command = (\"cd ~/goaway;\" +\n \"find . -name '*.pyc' -delete ;\" +\n \"DEBUG=true goaway/cmdserver.py %s %s %s >> server.std.log 2>&1\" % (\n host,\n port,\n self._config.remote_path,\n ))\n logger.debug(\"Starting server:%s remoteHost with command:%s\" % (remoteHost, command))\n ## subprocess.call blocks, while subprocces.Popen doesn't block.\n sshPopen = subprocess.Popen([\"ssh\", remoteHost, command],\n shell = False, stdout= subprocess.PIPE, stderr = subprocess.PIPE)\n self._start_local_server()", "def start(self):\n threading.Thread(target=self.serve_forever).start()", "def run(self):\n to_client_request_thread = threading.Thread(target=self._dispatch_to_client_request, daemon=True)\n to_client_request_thread.start()\n\n from_client_request_thread = threading.Thread(target=self._dispatch_from_client_request, daemon=True)\n from_client_request_thread.start()\n\n from_client_commands_thread = threading.Thread(target=self._from_client_commands, daemon=True)\n from_client_commands_thread.start()\n\n to_client_update_state_thread = threading.Thread(target=self._to_client_update_state, daemon=True)\n to_client_update_state_thread.start()\n\n server_control_thread = threading.Thread(target=self._server_control, daemon=True)\n server_control_thread.start()\n\n # Wait for threads to finish\n to_client_request_thread.join()\n from_client_request_thread.join()\n from_client_commands_thread.join()\n to_client_update_state_thread.join()\n server_control_thread.join()\n \n # Close server connection\n self._to_client_request.close()\n self._from_client_request.close()", "def execute(self):\n self.go_online()\n empty_tuple = ()\n thread.start_new_thread(self.get_user_input, empty_tuple)\n while not self.suspended:\n ds, addr = self.s.accept()\n client = ClientNode(self, addr, ds)\n # self.clients.append(client) # Now happens after username is verified in clientNode.py\n print 'Incoming connection from', addr\n if not self.suspended:\n thread.start_new_thread(client.execute, empty_tuple)\n print 'Server is suspended. Thank you!'", "def serve(self):\r\n for i in range(self.threads):\r\n try:\r\n t = threading.Thread(target = self.serveThread)\r\n t.setDaemon(self.daemon)\r\n t.start()\r\n except Exception, x:\r\n logging.exception(x)\r\n\r\n # Pump the socket for clients\r\n self.serverTransport.listen()\r\n while True:\r\n try:\r\n client = self.serverTransport.accept()\r\n self.clients.put(client)\r\n except Exception, x:\r\n logging.exception(x)", "def run(self):\n for worker in self.simulation_workers:\n worker.start()", "def init_client_seeker():\n client_seeker = threading.Thread(target=seek_for_client)\n client_seeker.daemon = True\n client_seeker.start()", "def start_all_nodes(self):\n for node in self.nodes:\n node.start()", "def run(self):\n\n # Create the ports\n self.wan_port.create_on_server(self.server)\n self.cpe_port.create_on_server(self.server)\n\n flows = []\n\n # Create all the downstream flows which are requested\n for i in range(self.number_of_downstream_flows):\n flow_name = \"Downstream_%d\" % (i + 1)\n logging.info('Creating flow \"%s\"', flow_name)\n flows.append(\n self.traffic_profile.create_between(name=flow_name,\n flow_number=i + 1,\n source=self.wan_port,\n destination=self.cpe_port,\n duration=self.traffic_duration)\n )\n\n # Create all the upstream flows which are configured\n for i in range(self.number_of_upstream_flows):\n flow_name = \"Upstream_%d\" % (i + 1)\n logging.info('Creating flow \"%s\"', flow_name)\n flows.append(\n self.traffic_profile.create_between(name=flow_name,\n flow_number=i + 1,\n source=self.cpe_port,\n destination=self.wan_port,\n duration=self.traffic_duration)\n )\n\n # Start the traffic and with until finished\n self.run_traffic(flows)\n\n # Get the results from the flow and return them in a list of dicts\n return [flow.get_results() for flow in flows]", "def connect(self):\n self.start()", "def start(self):\n self.sender.start()\n self.receiver.start()", "def start(self):\n self.sender.start()\n self.receiver.start()", "def connect(self, peer):\n peer.listen()\n time.sleep(0.1)\n client_thread = ClientThread(peer.address, self.message_queue, self.queue_lock, self.on_message_received)\n client_thread.start()\n self.connected_as_client = True # TODO only if successful", "def setup_targets(self):\n for i in range(self.min_peers):\n self.targets.append(dict(address=0, tolerance=0, connected=False))\n # NOT IMPLEMENTED HERE", "def start_streams(self, stream_list, get_result=False):\n for stream_id in stream_list:\n self.send_stream(stream_id, get_result=get_result)", "def init_workers():\n party_queue = Queue()\n p = Producer(party_queue)\n p.daemon = True\n c = Consumer(party_queue)\n c.deamon= True\n m = MasterUpdater(db,application_name)\n m.deamon = True\n p.start()\n c.start()\n m.start()", "def serveThread(self):\r\n while True:\r\n try:\r\n client = self.clients.get()\r\n self.serveClient(client)\r\n except Exception, x:\r\n logging.exception(x)", "def on_run(self):\n wxMediator.on_run(self)\n listener_evt = InterThreadEventWX(self,\n wxEVT_NEW_LISTEN_CONN) \n talker_evt = InterThreadEventWX(self,\n wxEVT_NEW_TALK_CONN) \n server = self.server()\n sys.stderr.write('Starting server threads...\\n')\n sys.stderr.flush()\n server.start_other_threads(listener_evt, talker_evt)", "def run(self):\n # bind to the port\n self.serversocket.bind(('0.0.0.0', PORT))\n print(\"Listening on: \" + Colors.BOLD + HOST + \":\" + str(PORT) + Colors.ENDC)\n print(\"... waiting for a connection\", file=sys.stderr)\n try:\n while True:\n # queue up to 5 requests\n self.serversocket.listen(5)\n clientsocket, addr = self.serversocket.accept()\n print(\"Got a connection from \" + Colors.WARNING + \"%s\" % str(addr) + Colors.ENDC)\n self.clientsocket.append(clientsocket)\n newthread = ClientThread(addr, self.clientsocket[-1])\n newthread.start()\n finally:\n for cskt in self.clientsocket:\n cskt.close()", "def start(self):\n listening_thread = Thread(\n target=self.sock.start_listening, daemon=True)\n listening_thread.start()\n sending_thread = Thread(target=self.sock.start_sending, daemon=True)\n sending_thread.start()\n\n ack_watch_thread = Thread(target=self.watch_for_acks, daemon=True)\n ack_watch_thread.start()\n\n ack_timeout_thread = Thread(\n target=self.watch_for_ack_timeout, daemon=True)\n ack_timeout_thread.start()\n\n self.report()", "def start(self):\n for trial in self._trials:\n self._run(trial)", "def prepare(self):\r\n self.socket.listen()\r\n for _ in xrange(self.threads):\r\n thread = Worker(self.tasks)\r\n thread.setDaemon(True)\r\n thread.start()\r\n self.prepared = True", "def run(self):\n self.connect()\n self.run_forever()", "def next(p):\n threading.Thread(target=forward, args=(p,)).start()\n threading.Thread(target=insertData, args=(p,)).start()\n threading.Thread(target=clientListen, args=(p,)).start()\n sys.exit()", "def run(i_cmd, i_ServerList, senario): #get servers name to run\n threads = []\n serverAddrList = getServersAddrs(i_ServerList)\n for server in serverAddrList:\n t = threading.Thread(target=doCMD, args=(i_cmd, server, senario,))\n threads.append(t)\n t.start()", "def run(self):\n logging.info(\"task manager started!\")\n t = TCPManager()\n s = t.set_cmd_connect()\n while True:\n sock, addr = s.accept()\n t = threading.Thread(target=self.manage_task, args=(sock, addr))\n t.start()", "def start(self, ids):\n self._request('torrent-start', {}, ids, True)", "def start(self):\n\n address = (socket.gethostbyname(self.hostname), self.port)\n logger.info(\"Connecting to %r\" % (address,))\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect(address)\n self._start_processors()\n return self", "def __init__(self) -> None:\n self._transport_list = []\n self._lock = threading.Lock()\n\n self._terminate_lock = threading.Lock()\n self._terminate = False\n\n self._handle_thread = threading.Thread(\n target=self.check_transports, args=(), name=\"Transport_Manager\")\n self._handle_thread.start()", "def ssh_parallel(self, *items, **kwargs):\n threads = []\n # generate and start all threads\n for node, spec in self.instances.items():\n t = threading.Thread(target=self._ssh_parallel_target, args=(node, kwargs.get(\"silent\", True), items))\n threads.append(t)\n t.start()\n # wait for all threads to complete\n for t in threads:\n t.join()", "def Broadcast(self, method, *args, **kwargs):\n for peer_id, (host, port, peer) in self.peers.iteritems():\n logging.debug('Calling method %r on peer %r.' % (method, peer_id))\n m = getattr(peer, method)\n m(self.peer_id, *args, **kwargs)", "def connect(self, ip):\n # Connect to the central server and tell it how many threads we have\n self.central_ip = ip\n join_message = message.Message('pc', self.avail_threads)\n self.send_message(join_message)", "def connect_thread():\n return factory.connect_thread(SlaveService, remote_service = SlaveService)", "def start(self, wait_for_port=False):\n for c in self.openstack_endpoints.values():\n c.compute = self.compute\n c.manage = self.manage\n c.server_thread = threading.Thread(target=c._start_flask, args=())\n c.server_thread.daemon = True\n c.server_thread.name = c.__class__.__name__\n c.server_thread.start()\n if wait_for_port:\n self._wait_for_port(c.ip, c.port)", "def runNodesMessage(self):\n while True:\n for neighbour in self.nextIP:\n socketNodes = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n while True:\n try:\n socketNodes.connect((neighbour, 5003))\n socketNodes.send(self.message)\n break\n except TimeoutError:\n pass\n except ConnectionRefusedError:\n pass\n socketNodes.close()\n break", "def start(self):\n for workload in self._workloads:\n self.log.info(\"%-20s STARTING port=%s\" % (workload.name(), workload.port()))\n workload.pre_start()\n workload.start()\n self._monitor_loop()\n self._cleanup()", "def peer_server_listener(self):\n try:\n peer_server_socket = socket.socket(\n socket.AF_INET, socket.SOCK_STREAM)\n peer_server_socket.setsockopt(\n socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n peer_server_socket.setsockopt(\n socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n peer_server_host = self.peer.peer_hostname\n peer_server_port = self.peer.peer_port\n peer_server_socket.bind(\n (peer_server_host, peer_server_port))\n peer_server_socket.listen(10)\n while True:\n conn, addr = peer_server_socket.accept()\n conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n #print \"Got connection from %s on port %s\" \\\n # % (addr[0], addr[1])\n self.peer_server_listener_queue.put((conn,addr))\n except Exception as e:\n print \"Peer Server Listener on port Failed: %s\" % e\n sys.exit(1)", "def do_start(self):\n threading.Thread(group = None, \n target = self._subscribe_message, name = \"RabbitMQSubscribeThread\") .start()\n threading.Thread(group = None, \n target = self._publish_message, name = \"RabbitMQPublishThread\").start()", "def start_processes(self):\n self.relay = subprocess.Popen(['ntlmrelayx.py', '-6', '-tf', self.targets_file, '-w', '-l', self.directory, '-of', self.output_file], cwd=self.directory)\n self.responder = subprocess.Popen(['responder', '-I', self.interface_name])", "def _process_requests_in_background(self):\n while True:\n readable, writable, exceptional = self._bg_select_peers()\n\n for peer in readable:\n data = peer.socket.recv(RECV_BYTES)\n if data:\n peer.incoming_buffer.feed(data)\n try:\n response = peer.incoming_buffer.unpack()\n except msgpack.OutOfData:\n continue\n peer.handle_response(response)\n else:\n self._bg_clean_up_peer(peer)\n if peer in writable:\n writable.remove(peer)\n if peer in exceptional:\n exceptional.remove(peer)\n\n for peer in writable:\n # single-reader configuration means we can safely unlock between\n # peeking and committing.\n with peer.lock:\n next_bytes = peer.outgoing_buffer.peek(SEND_BYTES)\n if not next_bytes:\n continue\n\n sent_bytes = peer.socket.send(next_bytes)\n if sent_bytes == 0:\n self._bg_clean_up_peer(peer)\n if peer in exceptional:\n exceptional.remove(peer)\n continue\n\n with peer.lock:\n peer.outgoing_buffer.commit_read(sent_bytes)\n\n for peer in exceptional:\n self._bg_clean_up_peer(peer)", "def run_forever(self, *args, **kwargs):\n try:\n self.logger.debug('Begin account update')\n\n # get account-updater server ownership\n self.get_ownership_obj = threading.Thread(target = self.msg.get_my_ownership)\n self.get_ownership_obj.setDaemon(True)\n self.get_ownership_obj.start()\n\n self.walker_obj = Walker(self.walker_map, self.__param, self.logger)\n self.walker_obj.setDaemon(True)\n self.walker_obj.start()\n self.logger.info(\"Walker Started\")\n self.reader_obj = Reader(self.walker_map, self.reader_map, \\\n self.__param, self.logger)\n self.reader_obj.setDaemon(True)\n self.reader_obj.start() \n self.logger.info(\"Reader Started\")\n self.account_sweeper = AccountSweep(self.__param, self.logger)\n self.account_sweeper.setDaemon(True)\n self.account_sweeper.start()\n self.logger.info(\"Account Sweeper Started\") \n self.updater_obj = Updater(self.walker_map, self.reader_map, \\\n self.__param, self.logger)\n self.updater_obj.setDaemon(True)\n self.updater_obj.start() \n self.logger.info(\"Updater Started\") \n self.container_sweeper = ContainerSweeper(self.walker_map, \\\n self.reader_map, self.__param, self.logger)\n self.container_sweeper.setDaemon(True)\n self.container_sweeper.start()\n self.logger.info(\"Container Sweeper Started\") \n\n account_updater_server = ThreadedAccountUpdaterServer(\\\n (self.__get_node_ip(gethostname()), \\\n self.__account_updater_port), HttpListener)\n account_updater_server.serve_forever()\n except Exception as ex:\n self.logger.error(\"Exception occured: %s\" % ex)", "def start(self):\n self.thread.start()", "def handle_clients(self):\n done = False\n while not done:\n try:\n # starts threads\n receive_video_client_socket, address = \\\n self.receive_video_socket.accept()\n print(\"connected relay video: {}\"\n .format(receive_video_client_socket))\n video_thread = \\\n threading.Thread(target=self.start_video_relay,\n args=(receive_video_client_socket, ))\n audio_thread = threading.Thread(target=self.start_audio_relay)\n video_thread.start()\n audio_thread.start()\n\n except socket.error as msg:\n print(\"socket failure handle clients: \", msg)\n done = True\n except Exception as msg:\n print(\"exception handle clients: \", msg)\n done = True", "def start(self) -> None:\n\n while True:\n timetable = []\n for bot in self.bots:\n timetable.append(bot.next_send_time)\n\n val, idx = min((val, idx) for (idx, val) in enumerate(timetable))\n sleep_time = val - time.time()\n\n if sleep_time > 0:\n print(f\"Sleep for: {sleep_time}\")\n time.sleep(sleep_time)\n self.bots[idx].start()", "def testSocketThreads(port, distanthost, distantport):\n sock = socket.socket()\n sock.bind(('', port))\n sock.listen(5)\n print(\"Server started on port\", port)\n while 1:\n try:\n clientsock, clientaddress = sock.accept()\n except KeyboardInterrupt:\n break\n print(\"Accepting new connection from\", clientaddress)\n distantsock = socket.socket()\n distantsock.connect((distanthost, distantport))\n t = threading.Thread(target=handleOneConnection, args=(clientsock,distantsock))\n t.name = \"ThreadFor{}\".format(clientaddress)\n t.start()\n print('End of server')\n sock.close()", "def start(self):\n if config['port'] or config['host']:\n port = config['port'] or 5222\n host = config['host'] or sleekxmpp.JID(config['jid']).host\n addr = (host, port)\n else:\n addr = tuple()\n self.connect(addr)\n self.process(threaded=True)", "def startListener(self):\n self.send_conn = None\n self.recv_conn = None\n listener = threading.Thread(target=self.listen, args=(self.recv_conn,))\n sender = threading.Thread(target=self.send, args=(self.send_conn,))\n listener.daemon = True # setting daemon to true means threads wont stop program from closing\n sender.daemon = True\n listener.start()\n sender.start()", "def start_all(self):\n for proc in self.get_all():\n proc.start()", "def run(self):\n client = ProcessorClient()\n try:\n client.connect(self.address)\n except Exception as e:\n self.error = e\n logging.error(e)\n else:\n self.clients[self.name] = client", "def run(self):\n inputs = [self.server]\n\n while self.running:\n print '1'\n try:\n readable, writeable, exceptional = \\\n select.select(inputs, [], [])\n except select.error, e:\n print 'select:error[%s]' % e.message\n break\n\n for sock in readable:\n print '2'\n if sock == self.server:\n client, address = self.server.accept()\n client.setblocking(0)\n inputs.append(client)\n # self.outputs.append(client)\n\n print 'Client[%s] connected!' % address[0]\n self.clients[client] = address[0]\n\n else:\n print '3'\n self.recv_data(sock)", "def run(self):\n self.running = True\n for channel in self:\n sleep(self.hop_interval)\n if self.running is False:\n return\n self.hop_channel(channel)", "def _run(self):\n #print(\"try to connect run\")\n while True:\n self._connect()\n while not self.connected and self.auto_retry is not None:\n gevent.sleep(self.auto_retry)\n self._connect()\n if self.connected:\n self.run()\n if self.auto_retry is None:\n break", "def start_pull_thread(self):\r\n threading.Thread(target=self._pull_thread).start()", "def _spawn_workers(self):\n self._event.set()\n self._workers = [ClassifierWorker(self._event, self._queue, self._results) for x in range(self._NUM_WORKERS)]\n [worker.start() for worker in self._workers]", "def __wait_for_master_ssh( self ):\n for _ in itertools.count( ):\n s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )\n try:\n s.settimeout( 5 )\n s.connect( ('mesos-master', 22) )\n return\n except socket.error:\n pass\n finally:\n s.close( )", "def start_publishers(self):\n for publisher in self._publishers:\n publisher.start()", "def create_worker(num_worker, server_ip, server_port):\n for i in range(int(num_worker)):\n print \"-- worker initializing --\"\n dask_server = Worker('tcp://'+server_ip+\":\"+str(server_port), loop=loop)\n dask_server.start()", "def run_in_parallel(self):\n\t\tfor p in self.parallel_threads:\n\t\t\tp.start()\n\t\tfor p in self.parallel_threads:\n\t\t\tp.join()", "def run_channels():\n\n # Run channel encoders\n for c in channels:\n threads[c] = threading.Thread()\n threads[c].name = c\n threads[c].run = channels[c].run\n threads[c].start()\n \n time.sleep(0.5)\n print()", "def connect_peers(self):\n assert self.targets\n # connect closest node to target id\n for t in self.targets:\n if len(self.connections) >= self.max_peers:\n break\n for knode in self.proto.routing.neighbours(devp2p.kademlia.Node.from_id(t['address'])):\n assert isinstance(knode, devp2p.kademlia.Node)\n # assure within tolerance\n if knode.id_distance(t['address']) < t['tolerance']:\n # make sure we are not connected yet\n remote = self.network[knode.id]\n if remote not in self.connections:\n if remote.receive_connect(self):\n t['connected'] = True\n self.connections.append(remote)\n break", "async def connect_to_peers_loop(peer_pool, nodes):\n while peer_pool.is_operational:\n try:\n if not peer_pool.is_full:\n await peer_pool.connect_to_nodes(nodes)\n await peer_pool.wait(asyncio.sleep(2))\n except OperationCancelled:\n break" ]
[ "0.67120516", "0.65352994", "0.649779", "0.64329314", "0.6382283", "0.62863195", "0.6221034", "0.62145627", "0.6192615", "0.6154389", "0.61105305", "0.60992175", "0.6057519", "0.6056828", "0.60463715", "0.60429364", "0.6019063", "0.5967418", "0.59653294", "0.595958", "0.5956205", "0.5934227", "0.59222454", "0.59157044", "0.59110254", "0.59083", "0.59042066", "0.587732", "0.5876997", "0.5876094", "0.5860798", "0.5842127", "0.5839878", "0.5835118", "0.58209187", "0.58186156", "0.58146757", "0.58140594", "0.5786698", "0.57828385", "0.57800776", "0.5772562", "0.57629895", "0.57488877", "0.57256174", "0.5717011", "0.5716651", "0.57161593", "0.5705147", "0.57044506", "0.57044506", "0.5701512", "0.5694531", "0.5690259", "0.5681617", "0.5681467", "0.56748426", "0.5664375", "0.56625885", "0.5650296", "0.5645962", "0.5636468", "0.5631921", "0.5631228", "0.5629434", "0.56179357", "0.55978405", "0.5591133", "0.5589276", "0.5581131", "0.5575006", "0.55716485", "0.5565192", "0.55581546", "0.55518544", "0.5547271", "0.5519767", "0.55128556", "0.55126923", "0.5512306", "0.5495378", "0.54931617", "0.54891574", "0.5484849", "0.54804707", "0.5480047", "0.5476405", "0.54588634", "0.54542094", "0.5453249", "0.5449869", "0.54456735", "0.5443187", "0.54363614", "0.5432944", "0.54259515", "0.5425162", "0.5421208", "0.5414936", "0.54138386" ]
0.7830526
0
Get the network delay between pid1 and pid2
def get_delay(self, pid1, pid2, is_block): is_slow = self.node_is_slow[pid1] or self.node_is_slow[pid2] p = random.uniform(Parameters.p_min, Parameters.p_max) c = Parameters.c_low if is_slow else Parameters.c_high m = Parameters.m if is_block else 0 d = random.expovariate(c / Parameters.d) return (p + m/c + d)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTimeDelay(*args):\n return args[0].TimeState.TimeDelay.time_delay", "def get_delay(self, src, dst):\n try:\n fwd_delay = self.awareness.graph[src][dst]['lldpdelay']\n re_delay = self.awareness.graph[dst][src]['lldpdelay']\n src_latency = self.echo_latency[src]\n dst_latency = self.echo_latency[dst]\n \n delay = (fwd_delay + re_delay - src_latency - dst_latency)/2\n return max(delay, 0)\n except:\n return float('inf')", "def network_delay(self):\r\n return (self.node_monitor_submit_time - self.clock_skew -\r\n self.scheduler_launch_time)", "def communication_delay(self, begin, end):\n\n duration, path = self.movement_model.shortest_distance(begin, end)\n path_clusters = self.count_clusters(path)\n\n segment_speed_pairs = list()\n path_index = 0\n last_segment = None\n for path_cluster in path_clusters:\n segments = list()\n if last_segment:\n segments.append(last_segment)\n\n while path[path_index] in path_cluster.tour.objects:\n segments.append(path[path_index])\n last_segment = path[path_index]\n\n path_index += 1\n if path_index >= len(path):\n break\n\n segment_speed_pairs.append((path_cluster.mdc_speed, segments))\n\n travel_delay = 0. # * pq.second\n for speed, segments in segment_speed_pairs:\n cluster_distance = 0 # * pq.meter\n start_segment = segments[0]\n for end_segment in segments[1:]:\n distance = np.linalg.norm(\n start_segment.location.nd - end_segment.location.nd)\n cluster_distance += distance\n\n travel_delay += cluster_distance / speed\n\n transmission_delay = len(path_clusters)\n transmission_delay *= data.segment_volume(begin, end, self.env)\n transmission_delay /= self.env.comms_rate\n\n relay_delay = self.holding_time(path_clusters[1:])\n\n total_delay = travel_delay + transmission_delay + relay_delay\n return total_delay", "def ping_delay(host):\r\n ret = connections.execute_shell_command_without_check('ping %s' % host)\r\n lines = ret.splitlines()\r\n for line in lines:\r\n if line.find('Minimum') > 0:\r\n result = re.search('^.*Minimum = (\\d*)ms, Maximum = (\\d*)ms, Average = (\\d*)ms', line)\r\n return result.groups()", "def network_delays(self):\r\n network_delays = []\r\n for task in self.__tasks.values():\r\n if task.complete():\r\n network_delays.append(task.network_delay())\r\n if task.network_delay() > 20:\r\n print \"Network delay over 20ms for %s\" % self.__id\r\n #print task.node_monitor_submit_time\r\n #print task.scheduler_launch_time\r\n #print task.clock_skew\r\n #print task.id\r\n #print task.address\r\n #print\r\n return network_delays", "def networkDelayTime(self, times, n: int, src: int) -> int:\n import heapq\n # The tuple is of form <distance, Node, parent>\n pq = [(0, src, -1)]\n graph = defaultdict(list)\n\n # Graph Built\n for u, v, w in times:\n graph[u].append([v, w])\n \n dist = {}\n parent = {}\n heapq.heapify(pq)\n\n while pq:\n dst, node, prnt = heapq.heappop(pq)\n if node in dist:\n continue\n dist[node] = dst\n parent[node] = prnt\n for nbr, wt in graph[node]:\n if nbr not in dist:\n heapq.heappush([wt+dst, nbr, node])\n \n return max(dist.values()) if len(dist) == n else -1", "def get_delay(self):\n if self.msg_tn == self.last_msg_tn:\n return\n with self.lock:\n if not self.delays:\n return\n n = len(self.delays)\n\n mean = sum(self.delays) / n\n std_dev = math.sqrt(sum((x - mean)**2 for x in self.delays) / n)\n\n max_delta = max(self.delays)\n min_delta = min(self.delays)\n\n self.last_msg_tn = self.msg_tn\n return mean, min_delta, max_delta, std_dev, n", "def network_delays(self):\r\n network_delays = []\r\n for task in self.__tasks.values():\r\n if task.complete():\r\n network_delays.append(task.network_delay())\r\n if task.network_delay() > 20:\r\n print \"Long launch %s\" % self.__id\r\n print task.node_monitor_submit_time\r\n print task.scheduler_launch_time\r\n print task.id\r\n print task.address\r\n print\r\n return network_delays", "def processsleepcycle(str1, str2, sleep_min):\n start_sleep = int(str1[15:17])\n end_sleep = int(str2[15:17])\n for x in range(start_sleep, end_sleep):\n sleep_min.append(x)\n return sleep_min", "def get_delay(self):\n if self.msg_tn == self.last_msg_tn:\n return\n if not self.delays:\n return\n n = len(self.delays)\n\n mean = sum(self.delays) / n\n std_dev = math.sqrt(sum((x - mean)**2 for x in self.delays) / n)\n\n max_delta = max(self.delays)\n min_delta = min(self.delays)\n\n self.last_msg_tn = self.msg_tn\n return mean, min_delta, max_delta, std_dev, n", "def DDdelay(self):\n return self.delayInverse()+self.delayS()+self.delayA()", "def get_delay(list_of_buses):\n #print('Inside delay:', list_of_buses)\n # Iterates over all the buses in the list and gets the delay for each bus\n for i in list_of_buses:\n # Calculate the delay, by first spliting the string (HH:MM:SS), into hours, minutes and seconds, then converting it to\n # ints and calculating the seconds. Does it for both the scheduled arrival time and the actual predicted arrival time\n scheduled_hours, scheduled_minutes, scheduled_seconds = i['scheduledarrivaldatetime'][11:].split(':')\n scheduled_arrival_seconds = int(scheduled_hours) * 3600 + int(scheduled_minutes) * 60 + int(scheduled_seconds)\n actual_hours, actual_minutes, actual_seconds = i['arrivaldatetime'][11:].split(':')\n actual_arrival_seconds = int(actual_hours) * 3600 + int(actual_minutes) * 60 + int(actual_seconds)\n\n # Calculate the delay\n delay = actual_arrival_seconds - scheduled_arrival_seconds\n\n # Left these prints here just in case we want to visually compare the times\n # print('Scheduled arrival time:', i['scheduledarrivaldatetime'], 'Actual arrival time:', i['arrivaldatetime'])\n # print(delay)\n\n # Append the delay to the dictionary that represents each bus, with key 'delay'\n i['delay'] = delay", "def d_delay_d_par(self,par):\n return self.d_delayI_d_par(par)+self.d_delayS_d_par(par)+ \\\n self.d_delayA_d_par(par)", "def pidGet(self) -> float:\n ...", "def pidGet(self) -> float:\n ...", "def delay_between(h1, h2):\n h1 = np.atleast_2d(h1)\n h2 = np.atleast_2d(h2)\n assert h1.shape[-1] == h2.shape[-1], \"h1 and h2 must have same number of samples\"\n\n L = h1.shape[-1]\n\n delay = np.zeros((h1.shape[0], h2.shape[0]), dtype=int)\n for i in range(h1.shape[0]):\n for j in range(h2.shape[0]):\n xcorrmax = np.argmax(np.correlate(h2[j], h1[i], mode=\"full\"))\n delay[i, j] = xcorrmax - L + 1\n\n return delay.squeeze()", "def dns_latency(self, run_test):\n\n if not run_test:\n return\n\n dig_res = None\n\n target = '8.8.8.8'\n\n if 'target' in self.nma.conf['dns_latency'].keys():\n target = self.nma.conf['dns_latency']['target']\n\n dig_delays = []\n\n for site in self.sites:\n dig_cmd = f'dig @{target} {site}'\n dig_res = Popen(dig_cmd, shell=True,\n stdout=PIPE).stdout.read().decode('utf-8')\n\n dig_res_qt = re.findall('Query time: ([0-9]*) msec',\n dig_res, re.MULTILINE)[0]\n dig_delays.append(int(dig_res_qt))\n\n self.results[\"dns_query_avg_ms\"] = sum(dig_delays) / len(dig_delays)\n self.results[\"dns_query_max_ms\"] = max(dig_delays)\n\n if not self.quiet:\n print(f'\\n --- DNS Delays (n = {len(dig_delays)}) ---')\n print(f'Avg DNS Query Time: {self.results[\"dns_query_avg_ms\"]} ms')\n print(f'Max DNS Query Time: {self.results[\"dns_query_max_ms\"]} ms')\n\n return dig_res", "def Time_Delay():\n Delay=[1]\n return Delay", "def receive_and_probing_time(self):\r\n latest_completion = 0\r\n for probe in self.__probes.values():\r\n\t\t \t if probe.complete():\r\n\t\t\t \t\t latest_completion = max(latest_completion, probe.completion_time)\r\n return latest_completion - self.__arrival_time", "def calculate_rtt(start, stop):\n global RTT\n # Recommended α: 0.8-0.9 (0.875 for most TCPs)\n alpha = .875\n sample = stop - start\n new_rtt = (alpha * RTT) + ((1 - alpha) *sample)\n RTT = new_rtt\n sock.settimeout(math.ceil(2 * RTT))", "def create_link_delay(self):\n try:\n for src in self.awareness.graph:\n for dst in self.awareness.graph[src]:\n if src == dst:\n self.awareness.graph[src][dst]['delay'] = 0\n continue\n delay = self.get_delay(src, dst)\n delay = round(delay*10**3,4)\n self.awareness.graph[src][dst]['delay'] = delay\n except:\n if self.awareness is None:\n self.awareness = lookup_service_brick('awareness')\n return", "def __cpu_time_deltas(sample_duration):\n \n with open(Path.proc_stat()) as f1:\n with open(Path.proc_stat()) as f2:\n line1 = f1.readline()\n time.sleep(sample_duration)\n line2 = f2.readline()\n \n deltas = [int(b) - int(a) for a, b in zip(line1.split()[1:], line2.split()[1:])]\n \n return deltas", "def calculate_delay(self, velocity):\n if velocity > 0:\n rps = velocity / 1.444\n delay = (1 / (200 * float(rps))) / 2\n return delay\n else:\n return 0", "def get_time(network, road_id):\n return network[0][road_id][4]", "def _compute_scaled_network_transfer_delay(data_type):\n _check_scaled_network_keywords(data_type=data_type)\n transfer_delay = []\n if data_type in load_keywards:\n load_list = [10, 15, 20, 25]\n num_nodes_list = np.arange(100, 11100, 1000)\n elif data_type in scale_keywords:\n load_list = np.arange(1, 44, 1)\n num_nodes_list = [512, 1280, 5120, 10240]\n for j in range(len(num_nodes_list)):\n num_nodes = num_nodes_list[j]\n length = num_nodes * 1.2\n network = Network(length=length, num_nodes=num_nodes)\n model = Model(network=network)\n _transfer_delay = []\n for i in range(len(load_list)):\n model.constants['average_bit_rate'] = load_list[i]\n simulator = BaseSimulator(convergence=True, model=model)\n _transfer_delay.append(get_transfer_delay(simulator=simulator) / 1000)\n if np.isinf(_transfer_delay[-1]):\n _transfer_delay[-1] = np.nan\n transfer_delay.append(_transfer_delay)\n if data_type in load_keywards:\n x = num_nodes_list\n y = list(map(list, zip(*transfer_delay)))\n labels = [str(item) + \"\\%\" for item in load_list]\n elif data_type in scale_keywords:\n x = load_list\n y = transfer_delay\n labels = [str(item) for item in num_nodes_list]\n return x, y, labels", "def delayToNextPacket(self):\n delay = -(1.0 / (self.mPacketsPerSecond)) * np.log(1 - np.random.uniform())\n # exponential distribution in seconds\n return round(delay * Constants.TICKS_PER_SECOND)\n #return (Math.round(delay * Main.TICKS_PER_SECOND))", "def getDelay(self, *args):\n return _libsbml.Event_getDelay(self, *args)", "def _get_delay(self):\n delay = int(60 / self.REQUESTS_PER_MINUTE * self.delay_multiplier)\n return delay", "def get_net(netsnt, netrcv, tempsnt, temprcv):\n return {\n 'NET-PKT-SNT': (netsnt - tempsnt),\n 'NET-PKT-RCV': (netrcv - temprcv),\n }", "def exchange_timestamps_thread(other_uuid: str, other_ip: str, other_tcp_port: int):\n print_yellow(f\"ATTEMPTING TO CONNECT TO {other_uuid}\")\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n client.connect((other_ip, other_tcp_port))\n data = client.recv(1024)\n except ConnectionError:\n print_red(\"connection refused\") \n exit()\n timestamp = struct.unpack(\"!d\", data)\n # calculate delay\n my_timestamp = datetime.datetime.utcnow().timestamp()\n delay = my_timestamp - timestamp[0]\n\n if other_uuid in neighbor_information:\n neighbor_information[other_uuid].delay = delay\n # neighbor_information[other_uuid].last_timestamp = timestamp[0]\n\n else:\n neighbour_node = NeighborInfo(delay, other_ip, other_tcp_port)\n neighbor_information.update({other_uuid: neighbour_node})\n\n client.close()", "def pcr_delta_time_ms(pcr_t1, pcr_t2, offset = 0):\n return float(pcr_t2-pcr_t1)/90000.0 + offset", "def distTip(self, id1, id2):\r\n # productive #math\r\n if frequent: profprint()\r\n node = slicer.mrmlScene.GetNodeByID('vtkMRMLModelNode' + str(id1))\r\n polydata = node.GetPolyData()\r\n node2 = slicer.mrmlScene.GetNodeByID('vtkMRMLModelNode' + str(id2))\r\n polydata2 = node2.GetPolyData()\r\n p, pbis = [0, 0, 0], [0, 0, 0]\r\n p2 = [0, 0, 0]\r\n p2bis = [0, 0, 0]\r\n tipDistance = []\r\n for i in range(100):\r\n polydata.GetPoint(i, p)\r\n polydata.GetPoint(polydata.GetNumberOfPoints()-1 - i, pbis)\r\n if pbis[2] > p[2]:\r\n p = pbis\r\n polydata2.GetPoint(i, p2)\r\n polydata2.GetPoint(polydata2.GetNumberOfPoints()-1 - i, p2bis)\r\n if p2bis[2] > p2[2]:\r\n p2 = p2bis\r\n tipDistance.append(((p2[0] - p[0]) ** 2 + (p2[1] - p[1]) ** 2 + (p2[2] - p[2]) ** 2) ** 0.5)\r\n return min(tipDistance)", "def __compute_delay(\n arrival_time : Time, \n next_departure_time : Time, \n ttype : str, \n previous_route_id : str,\n next_route_id : str\n ) -> Time:\n # Allowed delay is simply the difference between next departure and current arrival time\n time_to_make_connection = next_departure_time - arrival_time\n if (previous_route_id != next_route_id): # if we are changing route add additional delay\n time_to_make_connection -= EXTRA_TRANSFER_TIME[ttype] # decrease of allowed connection time\n return time_to_make_connection", "def packet_arrival():\r\n return 1.0", "def stack_ps(ps1, ps2, keep_unique = False, fill_time = False, message = True):\n # create deepcopies to avoid changing original instances\n \n ps1 = copy.deepcopy(ps1)\n ps2 = copy.deepcopy(ps2)\n \n # create datetime information in PS instances\n \n try:\n _ = getattr(ps1, \"datetime\")\n except AttributeError:\n ps1.createTimeDate()\n \n try: \n _ = getattr(ps2, \"datetime\")\n except AttributeError:\n ps2.createTimeDate()\n \n # check time resolutions\n res1 = (dt.datetime.strptime(ps1.datetime['data'][1], ps1.datetime['units']) - dt.datetime.strptime(ps1.datetime['data'][0], ps1.datetime['units'])).seconds\n res2 = (dt.datetime.strptime(ps2.datetime['data'][1], ps2.datetime['units']) - dt.datetime.strptime(ps2.datetime['data'][0], ps2.datetime['units'])).seconds\n \n if abs(res1-res2) > 60:\n if message:\n print( (\"warning: resolutions differ %d seconds\")%(abs(res1-res2)) )\n \n # check if ps1 is \"older\" than ps2\n \n reversed_order = False\n cut = None\n \n if dt.datetime.strptime(ps1.datetime['data'][-1], ps1.datetime['units']) < dt.datetime.strptime(ps2.datetime['data'][0], ps2.datetime['units']):\n # ps2 starts after ps1 ends\n timediff = (dt.datetime.strptime(ps2.datetime['data'][0], ps2.datetime['units']) - dt.datetime.strptime(ps1.datetime['data'][-1], ps1.datetime['units'])).total_seconds()\n elif dt.datetime.strptime(ps2.datetime['data'][-1], ps2.datetime['units']) < dt.datetime.strptime(ps1.datetime['data'][0], ps1.datetime['units']):\n # ps1 starts after ps2 ends (user has inadvertently switched the order of the instances)\n reversed_order = True\n timediff = (dt.datetime.strptime(ps1.datetime['data'][0], ps1.datetime['units']) - dt.datetime.strptime(ps2.datetime['data'][-1], ps2.datetime['units'])).total_seconds()\n else:\n # yikes! The particle sizer instances have overlapping data\n # it is assumed that ps2 data replaces ps1 data starting \n # from the overlapping time\n cut, cutdate = tt.findNearestDate(ps1.datetime['data'], ps2.datetime['data'][0]) \n fill_time = False\n \n #print(timediff, 1.5*res1)\n # check if filling is required\n if fill_time is True:\n # check time difference\n if reversed_order:\n # ps1 starts after ps2 ends\n if timediff > 1.5*res2:\n # the time gap between two instances has to be\n # larger than twice the normal resolution\n numdates = int(np.ceil(timediff/res2))\n base = dt.datetime.strptime(ps1.datetime['data'][0], ps1.datetime['units'])\n date_list = [base - dt.timedelta(seconds=res2*x) for x in range(numdates)]\n date_list = list(reversed(date_list[1:]))# because numdates starts at 0, first date on date_list is the same as the startdate from the second instance\n datetimelist = [dt.datetime.strftime(dl, ps2.datetime['units']) for dl in date_list]\n ps2.datetime['data'] = np.append(ps2.datetime['data'], datetimelist)\n timelist = [dt.datetime.strftime(dl, ps2.time['units']) for dl in date_list]\n ps2.time['data'] = np.append(ps2.time['data'], timelist)\n datelist = [dt.datetime.strftime(dl, ps2.date['units']) for dl in date_list]\n ps2.date['data'] = np.append(ps2.date['data'], datelist)\n else:\n fill_time = False\n else:\n if timediff > 1.5*res1:\n # the time gap between two instances has to be\n # larger than twice the normal resolution\n numdates = int(np.ceil(timediff/res1))\n base = dt.datetime.strptime(ps2.datetime['data'][0], ps2.datetime['units'])\n date_list = [base - dt.timedelta(seconds=res1*x) for x in range(numdates)]\n date_list = list(reversed(date_list[1:])) # because numdates starts at 0, first date on date_list is the same as the startdate from the second instance\n datetimelist = [dt.datetime.strftime(dl, ps1.datetime['units']) for dl in date_list]\n ps1.datetime['data'] = np.append(ps1.datetime['data'], datetimelist)\n timelist = [dt.datetime.strftime(dl, ps1.time['units']) for dl in date_list]\n ps1.time['data'] = np.append(ps1.time['data'], timelist)\n datelist = [dt.datetime.strftime(dl, ps1.date['units']) for dl in date_list]\n ps1.date['data'] = np.append(ps1.date['data'], datelist)\n else:\n fill_time = False\n \n if message:\n print(\"reversed order:\", reversed_order)\n # check which attributes are similar in both instances\n if reversed_order:\n # ps1 starts after ps2 ends\n new_ps = copy.deepcopy(ps2)\n for attribute in ps1.__dict__.keys():\n if attribute in ps2.__dict__.keys():\n afield = getattr(new_ps, attribute)\n if attribute == 'diameter':\n st11, st12, st21, st22, diamlist = check_diameters(ps1.diameter['data'], ps2.diameter['data'], ps1.instrument_type)\n \n for var in new_ps.data['variables']:\n if fill_time is True:\n add = np.ma.zeros((ps2.data[var]['data'].shape[0],len(date_list))) \n add[:] = np.nan\n newdata = np.append(ps2.data[var]['data'],add,axis=1)\n ps2.data[var]['data'] = newdata\n \n sh1 = ps1.data[var]['data'].shape\n sh2 = ps2.data[var]['data'].shape\n newfields = (len(diamlist) ,sh1[1] + sh2[1])\n new_field = np.ma.zeros(newfields)\n new_field[:] = np.ma.masked\n \n new_field[st21:st22, 0:ps2.data[var]['data'][:,:cut].shape[1]] = ps2.data[var]['data'][:,:cut]\n new_field[st11:st12, ps2.data[var]['data'][:,:cut].shape[1]:] = ps1.data[var]['data']\n \n new_ps.data[var]['data'] = new_field\n \n afield['data'] = diamlist\n \n elif attribute == 'data':\n # data has been appended with diameters\n pass\n else:\n try:\n field_ps2 = getattr(ps2, attribute)\n field_ps1 = getattr(ps1, attribute)\n except TypeError:\n if attribute == 'header':\n pass\n else:\n if message:\n print( (\"Could not append %s attribute\")%(attribute) )\n try:\n data_ps2 = field_ps2['data']\n data_ps1 = field_ps1['data']\n if attribute in ['date', 'datetime', 'time']: # these have already been extended with the correct data\n afield['data'] = np.append(data_ps2[:cut], data_ps1)\n elif fill_time:\n add = np.ma.zeros(len(date_list))\n add[:] = np.nan\n afield['data'] = np.append(np.append(data_ps2[:cut],add), data_ps1)\n else:\n afield['data'] = np.append(data_ps2[:cut], data_ps1)\n except:\n if message:\n print( (\"Could not append %s attribute\")%(attribute) )\n \n else:\n if keep_unique:\n newattribute = getattr(ps1,attribute)\n newattribute['time'] = ps1['datetime']['data']\n setattr(new_ps, attribute, newattribute)\n else:\n pass\n if keep_unique is False:\n # get rid of attributes which were in ps2 but not in ps1\n for attribute in ps2.__dict__.keys():\n if attribute in ps1.__dict__.keys():\n pass\n else:\n delattr(new_ps, attribute)\n \n \n else:\n # ps2 starts after ps1 ends\n new_ps = copy.deepcopy(ps1)\n for attribute in ps2.__dict__.keys():\n if attribute in ps1.__dict__.keys():\n afield = getattr(new_ps, attribute)\n if attribute == 'diameter':\n st11, st12, st21, st22, diamlist = check_diameters(ps1.diameter['data'], ps2.diameter['data'], ps1.instrument_type)\n for var in new_ps.data['variables']:\n if fill_time is True:\n add = np.ma.zeros((ps1.data[var]['data'].shape[0],len(date_list))) \n add[:] = np.nan\n newdata = np.append(ps1.data[var]['data'],add,axis=1)\n ps1.data[var]['data'] = newdata\n \n sh1 = ps1.data[var]['data'].shape\n sh2 = ps2.data[var]['data'].shape\n newfields = (len(diamlist) ,sh1[1] + sh2[1])\n new_field = np.ma.zeros(newfields)\n new_field[:] = np.ma.masked\n \n new_field[st11:st12, 0:ps1.data[var]['data'][:,:cut].shape[1]] = ps1.data[var]['data'][:,:cut]\n new_field[st21:st22, ps1.data[var]['data'][:,:cut].shape[1]:] = ps2.data[var]['data']\n \n new_ps.data[var]['data'] = new_field\n \n afield['data'] = diamlist\n \n elif attribute == 'data':\n # data has been appended with diameters\n pass\n else:\n try:\n field_ps2 = getattr(ps2, attribute)\n field_ps1 = getattr(ps1, attribute)\n except TypeError:\n if attribute == 'header':\n pass\n else:\n if message:\n print( (\"Could not append %s attribute\")%(attribute) )\n try:\n data_ps2 = field_ps2['data']\n data_ps1 = field_ps1['data']\n if attribute in ['date', 'datetime', 'time']: # these have already been extended with the correct data\n afield['data'] = np.append(data_ps1[:cut], data_ps2)\n elif fill_time:\n add = np.ma.zeros(len(date_list))\n add[:] = np.nan\n afield['data'] = np.append(np.append(data_ps1[:cut],add), data_ps2)\n else:\n afield['data'] = np.append(data_ps1[:cut], data_ps2)\n except:\n if message:\n print( (\"Could not append %s attribute\")%(attribute) )\n \n else:\n if keep_unique:\n newattribute = getattr(ps2,attribute)\n newattribute['time'] = ps2['datetime']['data']\n setattr(new_ps, attribute,newattribute)\n else:\n pass\n if keep_unique is False:\n # get rid of attributes which were in ps2 but not in ps1\n for attribute in ps1.__dict__.keys():\n if attribute in ps2.__dict__.keys():\n pass\n else:\n delattr(new_ps, attribute)\n \n new_ps.sample['data'] = np.arange(1.0, len(new_ps.datetime['data'])+1)\n new_ps.instrument_type = ps1.instrument_type.split('_')[0] + '_concatenated'\n \n if message:\n print('filltime: ', fill_time)\n \n return new_ps", "def calculate_delay(self, wav_start, wav_finish, thr_start, thr_finish):\n\n w_s=self.find_nearest_wav(wav_start)\n w_f=self.find_nearest_wav(wav_finish)\n temp=self.pre_proc_data.loc[:,w_s]\n t_start = self.times[(temp.values>thr_start).argmax()]\n print(t_start)\n\n temp2=self.pre_proc_data.loc[:,w_f]\n dx=temp2.diff()\n dx_clean=dx.ewm(span = 50).mean()\n t_finish=self.times[np.min(np.where(dx_clean<thr_finish))]\n print(t_finish)\n\n plt.subplot(211)\n plt.plot(temp,label='{}nm'.format(wav_start))\n plt.axvline(t_finish,color='grey')\n plt.axvline(t_start,color='grey')\n plt.xlim(t_start-30,t_finish+30)\n plt.legend()\n\n plt.subplot(212)\n plt.plot(temp2,label='{}nm'.format(wav_finish))\n plt.axvline(t_finish,color='grey')\n plt.axvline(t_start,color='grey')\n plt.xlim(t_start-30,t_finish+30)\n plt.legend()\n\n plt.show()\n\n self.t_delay=np.round(t_finish-t_start,2)\n return np.round(t_finish-t_start,2)", "def queryDelay(ants=0) :\n antlist=makeAntList(ants)\n print \"Current delays\"\n for a in antlist :\n mp = \"Control.Antenna%d.delayOffset3mmRx\" %a\n try :\n print \"delay(%7.3f, %2d)\" %(queryDouble(mp, retries=0), a)\n except Exception: \n print \"C%d\" %a, \"does not have a value or is invalid\"", "def return_delay_time(self):\n return self._read(MX_RETURN_DELAY_TIME)", "def get_propagation_time(self):\n return 0.0 # self.get_distance_to_gateway() / (3 * pow(10,8))", "def DelayAfterTransmit(self):\n\t\treturn self._get_attribute('delayAfterTransmit')", "def överlapp(tp1, tp2):\r\n\t# Dålig implementation, bryter mot abstraktionen. -- Fixed\r\n\tdt1 = senaste_klockslag(start_klockslag(tp1), slut_klockslag(tp1))\r\n\tdt2 = tidigaste_klockslag(slut_klockslag(tp2), slut_klockslag(tp2))\r\n\treturn ('tidsperiod', (dt1, dt2)) # \"lite\" fräschare\r", "def _highwireDelay(self, url):\n hostname = urlparse.urlsplit(url)[1]\n for hostKey, delaySec in pubConf.highwireDelayOverride.iteritems():\n if hostKey in hostname:\n logging.debug('Overriding normal Highwire delay with %d secs as specified in conf' % delaySec)\n return delaySec\n\n os.environ['TZ'] = 'US/Eastern'\n if hasattr(time, 'tzset'):\n time.tzset()\n tm = time.localtime()\n if tm.tm_wday in (5, 6):\n delay = 5\n elif tm.tm_hour >= 9 and tm.tm_hour <= 17:\n delay = 60\n else:\n delay = 10\n logging.log(5, 'current highwire delay time is %d' % delay)\n return delay", "def PortDelayUnit(self):\n\t\treturn self._get_attribute('portDelayUnit')", "def get_process_speed(self, pid):\n raise NotImplementedError()", "def ping_remote_system(host, options=\"\", ignore=\"\"):\r\n connection_type = connections.get_current_connection_type()\r\n\r\n## option_list = ''\r\n## for option in options:\r\n## option_list = option_list + option + ' '\r\n ret = connections.execute_shell_command_without_check('ping %s %s' % (options, host))\r\n time_delay = []\r\n delay_max_min_avg = []\r\n statistic_delay = []\r\n if connection_type == 'Windows':\r\n lines = ret.split(os.linesep)\r\n delay_pattern1 = re.compile(r'(?i).*Reply from.*?time=(\\d+)ms.*')\r\n delay_pattern2 = re.compile(r'(?i).*Reply from.*?time<(\\d+)ms.*')\r\n for line in lines:\r\n if re.match(r'(?i).*Reply from.*?time.*(\\d+)ms.*', line):\r\n time = delay_pattern1.match(line)\r\n if time:\r\n tmp = time.groups()[0]\r\n time_delay.append(int(tmp))\r\n else:\r\n time = delay_pattern2.match(line)\r\n if time:\r\n tmp = time.groups()[0]\r\n time_delay.append(int(tmp))\r\n else:\r\n time_delay.append(-1)\r\n else:\r\n result = re.search('.*Sent\\s*=\\s*(\\d*).*Received\\s*=\\s*(\\d*).*Lost\\s*=\\s*(\\d*)', line)\r\n if result:\r\n summary_result = result.groups()\r\n print \"Total ping delay is:\",time_delay\r\n\r\n if \"\"==ignore:\r\n statistic_delay = time_delay\r\n else:\r\n (start,end) = ignore.split(\":\")\r\n if \"\"==start:\r\n start = 0\r\n else:\r\n start = int(start)\r\n\r\n if \"\"==end:\r\n end = len(time_delay)\r\n else:\r\n end = int(end)\r\n\r\n for i in range(start,end):\r\n statistic_delay.append(time_delay[i])\r\n print \"Statistic ping delay is:\",statistic_delay\r\n\r\n invalid = statistic_delay.count(-1)\r\n for i in range(invalid):\r\n statistic_delay.remove(-1)\r\n print \"Valid ping delay is:\",statistic_delay\r\n\r\n\r\n if 0 < len(statistic_delay):\r\n list_max = max(statistic_delay)\r\n list_min = min(statistic_delay)\r\n list_avg = float(sum(statistic_delay))/float(len(statistic_delay))\r\n delay_max_min_avg.append(list_max)\r\n delay_max_min_avg.append(list_min)\r\n delay_max_min_avg.append(list_avg)\r\n else:\r\n delay_max_min_avg = [0,0,0]\r\n\r\n if connection_type == 'Linux':\r\n if line.find('packets') > 0:\r\n '4 packets transmitted, 0 received, 100% packet loss, time 3000ms'\r\n result = re.search('(\\d+)\\s*packets transmitted.*(\\d+)\\s*received', line)\r\n\r\n return (summary_result, delay_max_min_avg)", "def comm_times_single(ns, send_host, recv_host):\n\n return run_on_hosts((send_host, recv_host),\n '''python %sape/timings/communication/mpi_run_single.py \"%s\" %s %s'''%(\n ape_dir, str(ns), send_host, recv_host))", "def exchange_timestamps_thread(other_uuid: str, other_ip: str, other_tcp_port: int):\n print_yellow(f\"ATTEMPTING TO CONNECT TO {other_uuid}\")\n address = (other_ip, other_tcp_port)\n tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n tcp_socket.connect(address)\n received_time_stamp = struct.unpack('!f', tcp_socket.recv(4096))[0]\n current_timestamp = datetime.datetime.now().replace(tzinfo=timezone.utc).timestamp()\n\n delay = current_timestamp - received_time_stamp\n # print_red(f'Current: [{current_timestamp}] ==> Received [{received_time_stamp}]')\n print_red(f'[{other_uuid}] Current delay {abs(delay)}')\n node = NeighborInfo(delay, 1, other_ip, other_tcp_port)\n neighbor_information.update({other_uuid: node})\n\n pass", "def _probe_wait_time(self):\n r = self.probe_cycle_time / float(len(self.servers)) #self.probe_cycle_time=5\n r = max(.25, r) # Cap it at four per second\n return r", "def get_speed(network, road_id):\n return network[0][road_id][3]", "def PortDelayValue(self):\n\t\treturn self._get_attribute('portDelayValue')", "def get_timeout(self) -> int:", "def duration(self):\r\n return self.t2 - self.t1", "def tunnel2_bgp_holdtime(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"tunnel2_bgp_holdtime\")", "def _get_period(self, n_sec, n_nsec, p_sec, p_nsec):\n return self._get_time(n_sec, n_nsec) - self._get_time(p_sec, p_nsec)", "def _get_delay(self):\n delay = int(60 / self.REQUESTS_PER_MINUTE * len(accounts.get_all()))\n return delay", "def _get_delay(self):\n delay = int(60 / self.REQUESTS_PER_MINUTE * len(accounts.get_all()))\n return delay", "def net_delay_data(self):\n return self._net_delay_data", "def sample_delay(self, which):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_sample_delay(self, which)", "def remaining_ms():", "def _computModelDelay(self):\n\n # 'delay' and 'sleepModelRange' are mutually exclusive\n if self._params['delay'] is not None \\\n and self._params['sleepModelRange'] is not None:\n raise RuntimeError(\"Only one of 'delay' or \"\n \"'sleepModelRange' may be specified\")\n\n # Get the sleepModel range\n if self._sleepModelRange is not None:\n range, delay = self._sleepModelRange.split(':')\n delay = float(delay)\n range = map(int, range.split(','))\n modelIDs = self._jobsDAO.jobGetModelIDs(self._jobID)\n modelIDs.sort()\n\n range[1] = min(range[1], len(modelIDs))\n\n # If the model is in range, add the delay\n if self._modelID in modelIDs[range[0]:range[1]]:\n self._delay = delay\n else:\n self._delay = self._params['delay']", "def getRefDelay (self, time):\n return self._response.getRefDelay(time)", "def delay(self, distance, seconds):\n delay = distance/seconds\n return delay", "def getProcedureDelays(self, *values):", "def delay(ms: int, /) -> None:", "def exchange_timestamps_thread(other_uuid: str, other_ip: str, other_tcp_port: int):\r\n print_yellow(f\"ATTEMPTING TO CONNECT TO {other_uuid}\")\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.connect((other_ip, int(other_tcp_port)))\r\n other_time = sock.recv(4096)\r\n other_time = struct.unpack(\"!f\", other_time)\r\n print(\"*\" * 50)\r\n my_time = datetime.datetime.utcnow().timestamp()\r\n delay = my_time - other_time[0]\r\n neighbor = NeighborInfo(delay, my_time, other_ip,\r\n other_tcp_port, broadcast_count)\r\n print_red(neighbor_information)\r\n #neighbor_information.update({other_uuid: neighbor})\r\n neighbor_information[other_uuid] = neighbor\r\n print_red(neighbor_information)", "def delay(self, delay=None):\n if delay is None:\n return self._delayvalue\n self._delayvalue = int(delay)", "def test_delay():\n time1 = time.time()\n res = delay(1)(_dummy_func)(2)\n time2 = time.time()\n assert res == (2, 4)\n assert time2 - time1 >= 1", "def example2():\n arrive_time=rand_arr_time.rand_arr_time(6,100000,1000) # Get packet arrive time, with option 2, 100000 packets, expected in 1000 seconds.\n return arrive_time", "def rcvStrTimeOut(self, num=1, tou=0.1):\r\n\t\treturn self.rcvDataTimeOut(num, tou)", "def TxDelay(self):\n\t\treturn self._get_attribute('txDelay')", "def _delay(self, n=None):", "def getDelaySecs(host, forceDelaySecs):\n if globalForceDelay != None:\n logging.log(5, 'delay time is set globally to %d seconds' % globalForceDelay)\n return globalForceDelay\n elif forceDelaySecs != None:\n logging.log(5, 'delay time is set for this download to %d seconds' % forceDelaySecs)\n return forceDelaySecs\n else:\n logging.debug('Looking up delay time for host %s' % host)\n if host in crawlDelays:\n delaySecs = crawlDelays.get(host, defaultDelay)\n logging.info('Delay time for host %s set to %d seconds' % (host, delaySecs))\n return delaySecs\n logging.debug('Delay time for host %s not known' % host)\n return defaultDelay", "def sample_delay(self, which):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_sample_delay(self, which)", "def sample_delay(self, which):\n return _spacegrant_swig.udp_debug_sptr_sample_delay(self, which)", "def disconnect_delay(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"disconnect_delay\")", "def disconnect_delay(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"disconnect_delay\")", "def _calculate_periodic_loop_sleep_time(self, time_for_process):\n if time_for_process > self.min_time_per_process_loop:\n sleep_time = 0\n else:\n sleep_time = self.min_time_per_process_loop - time_for_process\n\n return sleep_time", "def get_c2c_time(job, pulse1, pulse2):\n analog_wf = job.simulated_analog_waveforms()\n element1 = pulse1[0]\n pulse_nb1 = pulse1[1]\n element2 = pulse2[0]\n pulse_nb2 = pulse2[1]\n\n time2 = (\n analog_wf[\"elements\"][element2][pulse_nb2][\"timestamp\"]\n + analog_wf[\"elements\"][element2][pulse_nb2][\"duration\"] / 2\n )\n time1 = (\n analog_wf[\"elements\"][element1][pulse_nb1][\"timestamp\"]\n + analog_wf[\"elements\"][element1][pulse_nb1][\"duration\"] / 2\n )\n\n return time2 - time1", "def get_distance(p1, p2):\n return ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) ** 0.5", "def StartDelay(self):\r\n\t\treturn self._get_attribute('startDelay')", "def delay(dt):\n return dt.total_seconds()", "def _get_sleep_time(self, start_date, end_date):\n if start_date.minute == end_date.minute:\n return 60 - end_date.second - (1 - start_date.microsecond / 1000000)\n\n return 0", "def part1() -> int:\n longest_sleeper = max(sleep_times, key=lambda g: len(sleep_times[g]))\n sleepiest_minute = max(\n sleep_times[longest_sleeper], key=sleep_times[longest_sleeper].count)\n\n return longest_sleeper * sleepiest_minute", "async def tormentdelay(self, ctx, delay : int = None):\r\n\t\t\r\n\t\tchannel = ctx.message.channel\r\n\t\tauthor = ctx.message.author\r\n\t\tserver = ctx.message.guild\r\n\r\n\t\t# Only allow owner to change server stats\r\n\t\tisOwner = self.settings.isOwner(ctx.author)\r\n\t\tif isOwner == None:\r\n\t\t\treturn\r\n\t\telif isOwner == False:\r\n\t\t\treturn\r\n\t\t\r\n\t\tif delay == None:\r\n\t\t\tif self.waitBetween == 1:\r\n\t\t\t\tawait ctx.message.author.send('Current torment delay is *1 second.*')\r\n\t\t\telse:\r\n\t\t\t\tawait ctx.message.author.send('Current torment delay is *{} seconds.*'.format(self.waitBetween))\r\n\t\t\treturn\r\n\t\t\r\n\t\ttry:\r\n\t\t\tdelay = int(delay)\r\n\t\texcept Exception:\r\n\t\t\tawait ctx.message.author.send('Delay must be an int.')\r\n\t\t\treturn\r\n\t\t\r\n\t\tif delay < 1:\r\n\t\t\tawait ctx.message.author.send('Delay must be at least *1 second*.')\r\n\t\t\treturn\r\n\t\t\r\n\t\tself.waitBetween = delay\r\n\t\tif self.waitBetween == 1:\r\n\t\t\tawait ctx.message.author.send('Current torment delay is now *1 second.*')\r\n\t\telse:\r\n\t\t\tawait ctx.message.author.send('Current torment delay is now *{} seconds.*'.format(self.waitBetween))", "def delay(self):\n return self._delay", "def LingerTime(self) -> int:", "def maximum_communication_delay(self):\n\n segment_pairs = ((src, dst) for src in self.sim.segments for dst in\n self.sim.segments if src != dst)\n\n delays = []\n for src, dst in segment_pairs:\n delay = self.communication_delay(src, dst)\n delays.append(delay)\n\n delays = np.array(delays)\n max_delay = np.max(delays)\n # max_delay *= pq.second\n\n return max_delay", "def get_retry_delay(self, last_delay):\n return last_delay * 2", "def ppid(self):", "def tunnel2_dpd_timeout_seconds(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"tunnel2_dpd_timeout_seconds\")", "def get_delay_diff(self):\n if type(self.apply_delay) is not int:\n err = 'ReplicationDelay().get_delay_diff(): '\\\n 'the self.apply_dilay should be an integer'\n raise ValueError(err)\n\n if self.current_time_lag_min:\n self.delay_diff = self.apply_delay - self.current_time_lag_min\n return self.delay_diff\n else:\n # Delay setting not found in a recovery.conf,\n # or a database is not in recovery mode:\n return 0", "def evaluate_time(self, pid, edge):\n return self.get_process_speed(pid) * edge.get_time()", "def print_host_time_diff(host_name, num_requests, last, first):\n last = datetime.datetime.fromtimestamp(float(last))\n if num_requests > 1:\n # calclate the time difference between the first and last request from this host\n first = datetime.datetime.fromtimestamp(float(first))\n time_diff = last - first\n print(\"{0}\\t{1}\".format(host_name, time_diff))\n else:\n # for 1 request just print the timestamp\n print(\"{0}\\t{1}\".format(host_name, last))", "def _predict_net_delay(self, env_inputs):\n for app in self.system.apps:\n for src_node in self.system.nodes:\n for dst_node in self.system.nodes:\n data = self._net_delay_data[app.id][src_node.id][dst_node.id]\n for step in range(len(env_inputs)):\n data_index = step + self._current_index + 1\n value = None\n if data_index < len(data):\n value = data[data_index]\n else:\n value = self.environment_input.net_delay[app.id][src_node.id][dst_node.id]\n env_inputs[step].net_delay[app.id][src_node.id][dst_node.id] = value\n env_inputs[step].net_path = self.environment_input.net_path\n return env_inputs", "def getDelay(self, channel, unitCode=0):\n resp = self.XAPCommand('DELAY', channel, unitCode=unitCode)\n return float(resp)", "def distTip(self,id1,id2):\n #productive #math\n profprint()\n node = slicer.mrmlScene.GetNodeByID('vtkMRMLModelNode'+str(id1))\n polydata=node.GetPolyData()\n node2 = slicer.mrmlScene.GetNodeByID('vtkMRMLModelNode'+str(id2))\n polydata2=node2.GetPolyData()\n p,pbis=[0,0,0],[0,0,0]\n p2=[0,0,0]\n p2bis=[0,0,0]\n axialDistance=[]\n for i in range(100):\n polydata.GetPoint(i,p)\n polydata.GetPoint(2499-i,pbis)\n if pbis[2]>p[2]:\n p=pbis\n polydata2.GetPoint(2499-i,p2)\n polydata2.GetPoint(i,p2bis)\n if p2bis[2]>p2[2]:\n p2=p2bis\n axialDistance.append((( p2[0]-p[0] )**2 + ( p2[1]-p[1] )**2 + (p2[2]-p[2])**2)**0.5)\n return min(axialDistance)", "def distBase(self, id1, id2):\r\n # productive #math\r\n if frequent: profprint()\r\n node = slicer.mrmlScene.GetNodeByID('vtkMRMLModelNode' + str(id1))\r\n polydata = node.GetPolyData()\r\n node2 = slicer.mrmlScene.GetNodeByID('vtkMRMLModelNode' + str(id2))\r\n polydata2 = node2.GetPolyData()\r\n p, pbis = [0, 0, 0], [0, 0, 0]\r\n p2 = [0, 0, 0]\r\n p2bis = [0, 0, 0]\r\n baseDistance = []\r\n for i in range(100):\r\n polydata.GetPoint(i, p)\r\n polydata.GetPoint(polydata.GetNumberOfPoints()-1 - i, pbis)\r\n if pbis[2] > p[2]:\r\n pbis = p\r\n polydata2.GetPoint(i, p2)\r\n polydata2.GetPoint(polydata2.GetNumberOfPoints()-1 - i, p2bis)\r\n if p2bis[2] > p2[2]:\r\n p2bis = p2\r\n baseDistance.append(((p2bis[0] - pbis[0]) ** 2 + (p2bis[1] - pbis[1]) ** 2 + (p2bis[2] - pbis[2]) ** 2) ** 0.5)\r\n return min(baseDistance)", "def delay(self):\n # well, so this is really bad practice\n # but since the nature of this app \n # I have to make assumptions around time..\n is_delayed_trader = self.delayed\n now = time.time()\n if not is_delayed_trader or self.message_arrival_estimate is None:\n self.message_arrival_estimate = now + self.default_delay\n delay = self.default_delay\n else:\n current_arrival_estimate = now + self.__delay\n if self.message_arrival_estimate > current_arrival_estimate:\n diff = self.message_arrival_estimate - current_arrival_estimate\n delay = diff + self.__delay\n self.message_arrival_estimate = now + delay \n else: \n self.message_arrival_estimate = current_arrival_estimate\n delay = self.__delay\n delay = round(delay, 4)\n log.debug('trader %s: message delay %s.' % (self.tag, delay))\n return delay", "def deepsleep(time_ms: int = None) -> None:" ]
[ "0.60461235", "0.59016067", "0.586062", "0.56952107", "0.55916893", "0.55790615", "0.55780035", "0.5559182", "0.554915", "0.5509043", "0.5487633", "0.5454966", "0.53850937", "0.53481144", "0.5299796", "0.5299796", "0.52713627", "0.5267619", "0.52288824", "0.52207476", "0.52097857", "0.5200298", "0.5195831", "0.51762074", "0.5164874", "0.5153719", "0.51536494", "0.51447356", "0.51416755", "0.5138872", "0.5138357", "0.51054543", "0.50843865", "0.50715417", "0.5068478", "0.5054189", "0.50407165", "0.503278", "0.5022971", "0.50173026", "0.5015625", "0.5012718", "0.5008382", "0.5004949", "0.50023407", "0.5000595", "0.49764207", "0.49710825", "0.49657908", "0.49605805", "0.49499765", "0.49379364", "0.4932052", "0.49192366", "0.49176392", "0.49092296", "0.49092296", "0.49071422", "0.4906497", "0.4904241", "0.4898088", "0.48956633", "0.48921385", "0.48856595", "0.48847416", "0.4880687", "0.4878002", "0.4877117", "0.48533267", "0.48499545", "0.48260877", "0.4816827", "0.48023564", "0.47990468", "0.4794476", "0.47887397", "0.47887397", "0.47859326", "0.4784951", "0.47848845", "0.47820637", "0.4777506", "0.4774312", "0.47742495", "0.47726136", "0.47662762", "0.4746256", "0.4739032", "0.47264242", "0.47250763", "0.4724692", "0.47239992", "0.47181475", "0.47176167", "0.4710689", "0.47011787", "0.4683358", "0.4682404", "0.46757954", "0.46726617" ]
0.75615823
0
Adds a meta file to database.
def add_meta_f_to_db(meta_f, p, dbi): rism_attributes = sd.Water3DRISM.__dict__.keys() extra_attributes = sd.Water3DRISMExtra.__dict__.keys() with open(os.path.join(p, meta_f), 'rb') as f: txt = f.readlines() inchi_line = txt[0] if inchi_line.startswith('InChI'): print inchi_line _, inchi = inchi_line.split(', ') inchi = inchi.strip() dbmol = dbi.get_molecule(inchi) rism = sd.Water3DRISM() rism_extra = sd.Water3DRISMExtra() else: raise ValueError('dbf file must start with InChI, <inchi code>') for line in txt[1:]: if ',' in line: line_l = line.split(', ') name = line_l[0].strip() values = map(lambda x: x.strip(), line_l[1:]) if len(line_l) == 2: if name in rism_attributes: rism.__setattr__(name, values[0]) elif name in extra_attributes: if name == 'UCorrMult': rism_extra.__setattr__(name, values[0]) else: with open(os.path.join(p, values[0]), 'rb') as f: value = f.read() rism_extra.__setattr__(name, value) elif len(line_l) == 4: rism_therm = sd.ThermodynamicOutput(Property=name) if values[0] != '-': rism_therm.TotalValue = values[0] if values[1] != '-': rism_therm.OContrib = values[1] if values[2] != '-': rism_therm.HContrib = values[2] rism.ThermOut.append(rism_therm) else: print 'Unknown attribute: {}'.format(name) rism.Extra = rism_extra dbmol.RISMCalcs.append(rism) dbi.add_molecule(dbmol) print 'Added molecule {}'.format(dbmol)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __appendMetaData(self, filename):\n metadata = {'Model': 'LFM',\n 'Source': filename,\n 'Date processed': datetime.datetime.now(),\n 'Start date': self.startDate\n }\n \n self.data.append(key='meta',\n name='Metadata for LFM Solar Wind file',\n units='n/a',\n data=metadata)", "def add_metadata(self, metadata: dict) -> None:", "def _load_meta(self, db, metadata, source_name) -> None:\n db.metadata.put_item(Item={\n 'src_name': source_name,\n 'data_license': metadata.data_license,\n 'data_license_url': metadata.data_license_url,\n 'version': metadata.version,\n 'data_url': metadata.data_url,\n 'rdp_url': metadata.rdp_url,\n 'data_license_attributes': metadata.data_license_attributes,\n 'genome_assemblies': metadata.genome_assemblies\n })", "def add_file_metadata(self):\n metadata = self.__file.require_group(METADATA)\n self.__write_value(metadata, DATE_CREATED, date.today().strftime(\"%Y-%m-%d\"))\n self.__write_value(metadata, SDK_VERSION, __version__)", "def _add_meta(self, *args, **kwargs) -> None:\n raise NotImplementedError", "def _add_new_struct_to_existing_database(self, db, filename):\n\n pass", "def writeMeta(outDir, metaData, fulltextData):\n filename = join(outDir, 'articleMeta.tab')\n logging.debug('Appending metadata to %s' % filename)\n minId = pubConf.identifierStart['crawler']\n metaData['articleId'] = str(minId + int(metaData['pmid']))\n if 'main.html' in metaData:\n metaData['fulltextUrl'] = metaData['main.html']['url']\n elif 'landingUrl' in metaData:\n metaData['fulltextUrl'] = metaData['landingUrl']\n if not isfile(filename):\n codecs.open(filename, 'w', encoding='utf8').write(u'\\t'.join(metaHeaders) + '\\n')\n maxCommon.appendTsvDict(filename, metaData, metaHeaders)\n row = []\n for h in metaHeaders:\n row.append(metaData.get(h, ''))\n\n dbFname = join(outDir, 'articles.db')\n con, cur = maxTables.openSqliteCreateTable(dbFname, 'articles', metaHeaders, idxFields=['pmid', 'pmcId', 'doi'], intFields=['pmid', 'articleId', 'pmcId'], primKey='pmid', retries=100)\n writeOk = False\n tryCount = 100\n logging.log(5, '%s' % row)\n while not writeOk and tryCount > 0:\n try:\n try:\n maxTables.insertSqliteRow(cur, con, 'articles', metaHeaders, row)\n except sqlite3.IntegrityError:\n logging.warn('Already present in meta info db')\n\n writeOk = True\n except sqlite3.OperationalError:\n logging.info('sqlite db is locked, waiting for 60 secs')\n time.sleep(60)\n tryCount -= 1\n\n if not writeOk:\n raise Exception('Could not write to sqlite db')", "def add_meta(self, name, value):\n for mt in self.metas:\n if mt.name == name:\n mt.value = value\n return self\n self.metas.append(MetaDef(name, value))\n return self", "def add_meta(self, post, *args, **kwargs):\n\t\tsuper(Command, self).add_meta(post, *args, **kwargs)\n\t\tpost.gen_description = False\n\t\tpost.description = description_from_content(post)\n\t\tpost.save()", "def item_add(self, item, filename):\n\t\tif not 'meta' in item:\n\t\t\titem['meta'] = {}\n\t\titem['meta']['filename'] = filename\n\t\t\n\t\t# Create directory if it does not already exist\t\t\t\t\n\t\tdirname = os.path.dirname(filename)\n\t\tif not os.path.isdir(dirname):\n\t\t\tos.makedirs(dirname)\n\n\t\tbuffer = self.print_conf( item )\n\t\tfile = open(filename,'a')\n\t\tfile.write( buffer )\n\t\tfile.close()\n\t\treturn True", "def add_android_metadata(conn, cur):\n\n print 'Adding Android metadata...',\n \n cur.execute('INSERT INTO android_metadata VALUES (\"pl_PL\")')\n \n conn.commit()\n \n print 'done.'", "def add(self, file_infos):\n self._check_writable_()\n \n for file_info in file_infos:\n #columns = mesh_id, value, date_data, lon, lat, date_added_to_db, sv_name, info\n #add file to db with status adding\n file_info['date_added_to_db'] = datetime.now()\n list_write = [file_info[el] if el in file_info else None for el in self._columns.keys()]\n #check for proper inputs\n self.check_column_values(list_write)\n \n #add to db\n self._cursor.execute('INSERT INTO FILEINFO VALUES (%s)'%(','.join(['?' for el in self._columns.keys()])), tuple(self.convert_column_dates2str(list_write)))\n self._conn.commit()", "async def add_metadata(dbcon: DBConnection, object_type: str, object_id: int, metadict: Dict[str, str]):\n\n async def _run(cur: Cursor) -> None:\n q = \"\"\"insert into object_metadata (object_type, object_id, `key`, value) values (%s, %s, %s, %s)\"\"\"\n for key, value in metadict.items():\n q_args = (object_type, object_id, str(key), str(value))\n await cur.execute(q, q_args)\n\n await dbcon.transact(_run)", "def addFile(self, path):\n self._model.insertFile(path)", "def save_meta(self, meta_file):\n pickle.dump(self._w2i, open(meta_file, \"wb\"))", "def save_meta(self):\n meta = self.serializer.dumps(self.meta)\n self.connection.hset(self.key, 'meta', meta)", "def add_metadata(product_dir, metadata_file):\n\n with open(metadata_file) as f:\n metadata = json.load(f)\n\n # get datasets config\n uu = UrlUtils()\n dsets_file = uu.datasets_cfg\n r = Recognizer(dsets_file, product_dir, product_dir, 'v0.1')\n\n # add\n metadata.setdefault('dataset_type', r.getType())\n metadata.setdefault('dataset_level', r.getLevel())\n\n # overwrite metadata json file\n with open(metadata_file, 'w') as f:\n json.dump(metadata, f, indent=2, sort_keys=True)", "def load_meta(self, meta_file):\n self._w2i = pickle.load(open(meta_file, \"rb\"))", "def add_meta(self, **info):\n\tnew_meta = dict(self.meta)\n\tnew_meta.update(info)\n\treturn type(self)(self.stat, self.value, **new_meta)", "def _append_metadata_to_database(self, metadata):\n df = pd.DataFrame([metadata])\n if os.path.exists(self._metadata_file):\n session_df = self.get_session_metadata()\n df = pd.concat([session_df, df], axis=0, ignore_index=True)\n\n with open(self._metadata_file, 'w') as f:\n df.to_csv(f, index=False)", "def add_metadata (self, name, value):\n self.metadata[name] = value\n return self", "def _add_to_index( env, meta_dict, file_str, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index...\" )\n adapter = adapter_file.adapter(env)\n adapter_glob = adapter\n doc = document(\n env[\"metadata\"][\"known_keys\"].keys(),\n meta_dict,\n env,\n )\n return adapter.add(doc, boosts=env[\"metadata\"][\"boosts\"])\n #logger.info(u\"Added to index [%s]\", file_str)", "def main(args):\n metafiles = []\n verbose = args.verbose\n\n if (args.metalist is not None):\n for listfile in args.metalist:\n metafiles.extend(addmeta.list_from_file(listfile))\n\n if (args.metafiles is not None):\n metafiles.extend(args.metafiles)\n\n if verbose: print(\"metafiles: \",\" \".join([str(f) for f in metafiles]))\n\n addmeta.find_and_add_meta(args.files, metafiles)", "def save_meta(self):\n # jOut = os.path.join(self.meta[\"wdir\"], meta_file)\n with open(self.meta_filepath, \"w\") as f:\n json.dump(self.meta, f)", "def add_source_metadata(self, src_name: SourceName, metadata: SourceMeta) -> None:\n metadata_item = metadata.dict()\n metadata_item[\"src_name\"] = src_name.value\n try:\n self.metadata.put_item(Item=metadata_item)\n except ClientError as e:\n raise DatabaseWriteException(e)", "def addMetaMolecule (self,metaMolecule):\r\n self.metaMolecule = metaMolecule", "def add_meta(self, name, content, scheme=None):\n meta = ET.SubElement(self.head, 'meta', name=name, content=content)\n if scheme is not None:\n meta.set(\"scheme\", scheme)", "def add(self, filename, *args):\n return self.cmd('add', filename, *args)", "def registerDirMetaData(self, lfn, meta_dict):\n filename = os.path.basename(lfn)\n lpn = lfn.split(filename)[0].rstrip('/')\n res = self.fc.setMetadata(lpn, meta_dict)\n if not res['OK']:\n gLogger.error('Setting Meta Data from file (%s) on dir (%s) failed with message (%s)' %(filename, lpn, res['Message']))\n else:\n gLogger.info('Setting meta data on lpn (%s) succeeded' %lpn)\n ### res is either S_OK or S_ERROR\n return res", "def add_to_db(self):\r\n for filename in self.new_data_files:\r\n unique_name = form_unique_name(filename)\r\n extracted_date = extract_date(filename)\r\n if extracted_date is not None:\r\n # If we can parse the date from the filename we parse the file\r\n file_ = File(filename, unique_name, extracted_date)\r\n content = file_.get_content()\r\n for element in content:\r\n # If each of the spectra in the file has data, we\r\n # add it to the data base\r\n if element[1] is not None:\r\n self.add_to_db_single(element)\r\n status_msg('Elements of file {0} added to db'.format(\r\n unique_name), True)\r\n else:\r\n status_msg('File {0} not added, unknown filename format'.\r\n format(unique_name), False)", "def _meta_json_to_database(self):\n\n sqlalchemy_metadata = MetaData() # this is unrelated to our meta.json\n meta_table = Table('meta', sqlalchemy_metadata,\n Column('meta', String))\n\n sqlalchemy_metadata.create_all(self.engine)\n json_string = json.dumps(self.meta)\n ins = meta_table.insert().values(meta=json_string)\n conn = self.engine.connect()\n conn.execute(\"DELETE FROM meta;\")\n conn.execute(ins)", "def add_gallery(galleryname, gpath, description, tags, zipfile, \n users_r, users_w, groups_r, groups_w, table, db_file):\n \n try:\n conn, c = connect_to_db(db_file)\n sql_cmd = \"\"\"INSERT INTO {t}\n (id, gallery_name, gpath, description, tags, \n time_added, zipfile, users_r, users_w, groups_r, groups_w)\n VALUES (NULL, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);\"\"\".format(t=safe(table))\n c.execute(sql_cmd, (galleryname, gpath, description, tags,\n time.asctime(time.localtime(time.time())),\n zipfile, users_r, users_w, groups_r, groups_w,))\n conn.commit()\n conn.close()\n except Exception as e:\n print(\"Error when trying to add gallery in table \" + table + \\\n \" in \" + db_file)\n print(e)\n return False\n else:\n return True", "def persist(self) -> None:\n logger.info('Generating or Updating meta data file {}'.format(self.file_path))\n with open(self.file_path, 'w', encoding='utf-8') as meta_file:\n meta_file.write(json.dumps(self, default=lambda value: value.__dict__))", "def add_file(self, path):\n pass", "def add(self, path, title, author):\n path = path.decode('utf8')\n title = title.decode('utf8')\n author = author.decode('utf8')\n cursor = self._dbcon.cursor()\n filename = os.path.basename(path)\n dirname = os.path.dirname(path)\n t = (title, author, filename, dirname)\n sql = u\"insert into books values (?, ?, ?, ?)\"\n cursor.execute(sql, t)\n self._dbcon.commit()\n cursor.close()", "def add_metadata(graph, metadata_fp, metadata_col):\n metadata = pd.read_csv(metadata_fp, sep='\\t')\n\n if 'node' not in metadata.columns:\n raise ValueError(f\"Must be a column called 'node' in metadata with \"\n f\"node labels: {metadata_fp}\")\n\n if metadata_col not in metadata.columns:\n raise ValueError(f\"Supplied metadata col ({metadata_col} not found in \"\n f\" {metadata_fp}\")\n\n if set(graph.nodes) != set(metadata['node'].values):\n raise ValueError(f\"Metadata node column doesn't contain same values \"\n \"as node names in the graph\")\n\n metadata_labels = metadata.set_index('node')[metadata_col].to_dict()\n\n nx.set_node_attributes(graph, metadata_labels,\n metadata_col.replace(' ', '_'))\n\n return graph", "def meta(self, meta):\n\n self._meta = meta", "def meta(self, meta):\n\n self._meta = meta", "def add_plantcv_metadata(session, url, fileid, metadata):\n # print(json.dumps(metadata))\n r = session.post(posixpath.join(url, \"api/files\", fileid, \"metadata\"),\n headers={\"Content-Type\": \"application/json\"}, data=json.dumps(metadata))\n\n # Was the upload successful?\n if r.status_code != 200:\n raise StandardError(\"Uploading metadata failed: Return value = {0}\".format(r.status_code))", "def insert_file(self, filename, value_dict):\n try:\n hash = self.tree.insert(filename, self)\n except ValueError:\n raise StashError('That file is already stored in the stash!')\n query = \"\"\"insert into files (hash, filename, timestamp)\n values (?, ?, datetime('now'))\"\"\"\n self.connection.execute(query,(hash, os.path.basename(filename)))\n self.connection.commit()\n if value_dict:\n metadata = {'hash': hash}\n metadata.update(value_dict)\n self.set_fields(metadata)", "def gen_meta(self, filename):\n nf_meta = {}\n nf_meta['filename'] = filename\n nf_meta['deleted'] = 0\n\n # http://stackoverflow.com/a/5297483\n nf_meta['key'] = hashlib.md5(str(filename).encode('utf-8')).hexdigest()\n self.log.debug(\"Note File Meta Key: %s\", nf_meta['key'])\n\n path = self.config.get_config('cfg_nt_path')\n\n # WARNING THIS IS PLATFORM SPECIFIC\n nf_meta['createdate'] = os.stat(path + \"/\" + filename).st_birthtime\n self.log.debug(\"Note File Meta Created: %s [%s]\", nf_meta['createdate'], time.ctime(nf_meta['createdate']))\n\n nf_meta['modifydate'] = os.stat(path + \"/\" + filename).st_mtime\n self.log.debug(\"Note File Meta Modified: %s [%s]\", nf_meta['modifydate'], time.ctime(nf_meta['modifydate']))\n\n return nf_meta", "def sync_metadata_to_db(self):\n metadata_file_path = os.path.join(\n self.plugin_path, self.plugin_metadata)\n\n metadata = self._load_config(metadata_file_path) or {}\n Plugin.update(self.plugin, metadata)", "def add_data(self, file_name: str, fabricated_count: dict) -> None:\n\n assert file_name not in self._meta_data_dict, \"Error, filename has already been used.\"\n\n self._meta_data_dict[file_name] = fabricated_count", "def load_meta(self, attrs, meta_path=None):\n ts = time.time()\n logger.info('Rechunking meta')\n meta = None\n if meta_path is not None:\n if meta_path.endswith('.csv'):\n meta = pd.read_csv(meta_path)\n meta = to_records_array(meta)\n elif meta_path.endswith('.npy'):\n meta = np.load(meta_path)\n\n if meta is None:\n with h5py.File(self._src_path, 'r') as f:\n meta = f['meta'][...]\n\n if isinstance(attrs['chunks'], int):\n attrs['chunks'] = (attrs['chunks'], )\n\n attrs['dtype'] = meta.dtype\n ds = self.init_dset('meta', meta.shape, attrs)\n ds[...] = meta\n logger.info('- meta transfered')\n tt = (time.time() - ts) / 60\n logger.debug('\\t- {:.2f} minutes'.format(tt))", "def set_metadata(self, key, value):\n if '::' not in key:\n raise ValueError('Invalid key %s; must be prefixed with \"appname::\"' % key)\n\n self._db_query('DELETE FROM meta WHERE attr=?', (key,))\n self._db_query('INSERT INTO meta VALUES (?, ?)', (key, value))\n self._set_dirty()", "def addSchemaFile(self, newSchemaFile):\n\t\tself.schemaFile.append(newSchemaFile)", "def save_meta_file(self, new_dict):\n try:\n self.logger.debug('Save new metadata file %s.', self.meta_file_path)\n if not self._meta_dict:\n self._meta_dict = {}\n\n self._meta_dict[\"schema\"] = \"http://telegram-messages-dump/schema/v/1\"\n\n if DumpMetadata.CHAT_NAME in new_dict:\n self._meta_dict[DumpMetadata.CHAT_NAME] = new_dict[DumpMetadata.CHAT_NAME]\n if DumpMetadata.LAST_MESSAGE_ID in new_dict:\n self._meta_dict[DumpMetadata.LAST_MESSAGE_ID] =\\\n new_dict[DumpMetadata.LAST_MESSAGE_ID]\n if DumpMetadata.EXPORTER in new_dict:\n self._meta_dict[DumpMetadata.EXPORTER] = new_dict[DumpMetadata.EXPORTER]\n\n self.logger.info('Writing a new metadata file.')\n with open(self.meta_file_path, 'w') as mf:\n json.dump(self._meta_dict, mf, indent=4, sort_keys=False)\n except OSError as ex:\n raise MetadataError(\n 'Failed to write the metadata file. {}'.format(ex.strerror))", "def add_metadata(self, key, value):\n self._h5.attrs[key] = value", "def _add_filename_metadata(self, extra_metadata): \n \n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n \n file_name = os.path.basename(self.fname)\n fn_comps = file_name.split(\"_\")\n \n if self.__class__ == SAFESentinel1:\n component = fn_comps[2]\n if len(component) < 4: \n resolution = 'N/A'\n else:\n resolution = component[-1]\n \n extra_metadata['product_info']['Resolution'] = resolution\n \n # Add file/scan name \n extra_metadata['product_info']['Name'] = os.path.splitext(file_name)[0]\n \n # Add Satellite and Mission from the file path\n comp_1 = fn_comps[0].upper()\n extra_metadata['platform']['Mission'] = \"Sentinel-%s\" % comp_1[1]\n extra_metadata['platform']['Satellite'] = \"Sentinel-%s\" % comp_1[1:]", "def write_metadata(dir_path, fs, *metas, global_metadata=True):\n assert metas\n md = metas[0]\n with fs.open(\"/\".join([dir_path, \"_common_metadata\"]), \"wb\") as fil:\n md.write_metadata_file(fil)\n if global_metadata:\n for meta in metas[1:]:\n md.append_row_groups(meta)\n with fs.open(\"/\".join([dir_path, \"_metadata\"]), \"wb\") as fil:\n md.write_metadata_file(fil)", "def append(self, filename):\n\n self.db.single_insert_camera(filename)\n self.db.batch_insert_camera(filename)", "def add_file(self, fieldname, filename, fileHandle, mimetype=None):\n\tbody = fileHandle.read()\n if mimetype is None:\n mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n self.files.append((fieldname, filename, mimetype, body))\n return", "def addItem(self, object, file):\n counter = len(self.database) + 1\n self.database.append(object)\n with open (file, \"a+\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n writer.writerow(object)\n return \"Dodano do bazy\"", "def add_field(self, field_name, field_type):\n field_name = field_name.replace('\"','')\n if field_type == 'keyword':\n query = \"\"\"insert or ignore into keywords\n (_keyword) values (\"%s\")\"\"\" % field_name\n else:\n query = 'alter table files add column \"%s\" %s' % (\n field_name, field_type)\n self.connection.execute(query)\n self.connection.commit()\n self.init_fields()", "def insertfile(self, datas):\n query = \"\"\"INSERT INTO caro_song (score, filename, artist, album, title, genre, played, uniq, global_score, family) VALUES (0, %s, %s, %s, %s, %s, 0, %s, 0, 0);\"\"\"\n cur = self.conn.cursor()\n try:\n cur.execute(query, (datas[0],\n datas[1],\n datas[2],\n datas[3],\n datas[4],\n datas[5]\n ))\n except KeyError:\n query = \"\"\"INSERT INTO caro_logs (filename, message, date_import) VALUES (%s, 'ERROR 02', now());\"\"\"\n cur.execute(query, (datas[0],))", "def send_file_meta(conn, filename):\n if not os.path.isfile(filename):\n pkt = CCPacket(CCHeader(MSG['ERR_NOFILE']))\n conn.send(pkt.packed())\n else:\n meta = MetaInfo(filename)\n pkt = CCPacket(CCHeader(MSG['FILEMETA']), meta)\n conn.send(pkt.packed())", "def update_metadata(self, file_id, metadata):\n pass", "def addDoc(self, doc, path):\n query = \"INSERT OR REPLACE INTO docs (local_path, resource_id, etag, title) VALUES (?, ?, ?, ?)\"\n self.db.execute(query, (path, doc.resource_id.text, doc.etag, doc.title.text))", "def update_meta(self, db_name, collection_name, is_finished):\n import time\n from ir_config import IRConfig\n meta_collection = self.__get_meta_collection(db_name)\n res = self.__find_collection_in_meta(db_name, collection_name)\n if res.count() > 0:\n meta_collection.update({self.__meta_key_name : collection_name},\n {'$set' : {self.__meta_lastmodified_name : int(time.time()),\n self.__meta_success_name : is_finished}}) \n else:\n meta_collection.insert({self.__meta_key_name : collection_name,\n self.__meta_lastmodified_name : int(time.time()),\n self.__meta_success_name : is_finished})", "def addMetaDataToItem(self, itemId, metadata) :\n path = 'item/' + itemId + '/metadata'\n #return self.sendRestRequest('PUT', path, params={'token': self.token}, data=metadata)\n obj = requests.put(self.urlBase + path, params={'token': self.token}, data=json.dumps(metadata))\n return obj.json()", "def add_meta(self, name, qtype, label, categories=None, items=None,\n text_key=None, replace=True):\n # verify name\n self._in_blacklist(name)\n make_array_mask = True if items else False\n self._verify_variable_meta_not_exist(name, make_array_mask)\n\n # verify qtype\n valid = ['delimited set', 'single', 'float', 'int', 'date', 'string']\n categorical = ['delimited set', 'single']\n numerical = ['int', 'float']\n if not qtype in valid:\n raise NotImplementedError('Type {} data unsupported'.format(qtype))\n elif qtype in categorical and not categories:\n val_err = \"Must provide 'categories' when requesting data of type {}.\"\n raise ValueError(val_err.format(qtype))\n elif qtype == 'delimited set' and len(categories) == 1:\n qtype = 'single'\n print('Only one category is given, qtype is switched to single.')\n elif qtype in numerical and categories:\n val_err = \"Numerical data of type {} does not accept 'categories'.\"\n raise ValueError(val_err.format(qtype))\n\n if not text_key: text_key = self.text_key\n if make_array_mask:\n self._add_array(name, qtype, label, items, categories, text_key)\n return None\n\n new_meta = {'text': {text_key: label},\n 'type': qtype,\n 'name': name,\n 'parent': {},\n 'properties': {'created': True}}\n if categories:\n new_meta['values'] = self._make_values_list(categories, text_key)\n self._meta['columns'][name] = new_meta\n datafile_setname = 'columns@{}'.format(name)\n if datafile_setname not in self._meta['sets']['data file']['items']:\n self._meta['sets']['data file']['items'].append(datafile_setname)\n if replace or not name in self._data.columns:\n self._data[name] = '' if qtype == 'delimited set' else np.NaN\n return None", "def add_tag(filename, tag_name):\n storeapps = APP.config[\"storage\"]\n filename = filename.encode(\"utf-8\")\n print filename\n\n try:\n application = list(nativeapps.io.ls(storeapps, r\".*\" + filename + \"$\"))[0]\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n metadata = json.loads(nativeapps.io.readfile(meta_path))\n tags = set(metadata.get(\"tags\", []))\n tags.add(tag_name)\n metadata[\"tags\"] = list(tags)\n nativeapps.io.writefile(meta_path, json.dumps(metadata))\n except IndexError:\n return \"Unknown application: %s\" % (application), 404\n\n return \"added\", 200", "def insert(self, media):\n insert_query = \"\"\"INSERT INTO %s VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n \"\"\" % MediaCollection.COLLECTIONS_TABLE\n self.cursor.execute(insert_query, media.totuple())\n self.connection.commit()", "def add(self, content):\n if (not self.exists()):\n raise IOError(\"File at '{}' does not exist.\".format(self.location))\n with open(self.location, 'a') as f:\n f.write(content)", "def add_file(self, field_name, file_name, file_content, mimetype=None):\n if mimetype is None:\n mimetype = mimetypes.guess_type(file_name)[0] or 'application/octet-stream'\n self.files.append((field_name, file_name, mimetype, file_content))\n return", "def add_file(self, path):\n self.git_repo.index.add([path])", "def meta(self, meta):\n if not isinstance(meta, GiftiMetaData):\n raise TypeError(\"Not a valid GiftiMetaData instance\")\n self._meta = meta", "def add_metadata(self, metdatadata_list):\n self._metadata.append((metdatadata_list, ))", "def add_metadata(self, metdatadata_list):\n self._metadata.append((metdatadata_list, ))", "def savemetadata(self, newmeta):\n with open(self.outputMetadata, 'w') as f:\n json.dump(newmeta, f)", "def add_object(_object):\n print('add_object: ' + str(_object))\n try_insert_or_update(\n models.objects.insert(), # pylint: disable=no-value-for-parameter\n [dict(\n project_id=_object['project_id'], filename=_object['filename'])])", "def addFromFile(self, metadataProfileId, objectType, objectId, xmlFile):\n\n kparams = KalturaParams()\n kparams.addIntIfDefined(\"metadataProfileId\", metadataProfileId);\n kparams.addStringIfDefined(\"objectType\", objectType)\n kparams.addStringIfDefined(\"objectId\", objectId)\n kfiles = {\"xmlFile\": xmlFile}\n self.client.queueServiceActionCall(\"metadata_metadata\", \"addFromFile\", \"KalturaMetadata\", kparams, kfiles)\n if self.client.isMultiRequest():\n return self.client.getMultiRequestResult()\n resultNode = self.client.doQueue()\n return KalturaObjectFactory.create(resultNode, 'KalturaMetadata')", "def add_tag_meta(self, new_tag_meta: TagMeta) -> ProjectMeta:\n return self.add_tag_metas([new_tag_meta])", "def append_meta_data(content, guid):\n timestamp = str(datetime.now())\n\n meta = {\"Meta\": {\"TimeStamp\": timestamp, \"guid\": str(guid)}}\n\n content.update(meta)\n\n return content", "def add_movie_to_db(self):\n MOVIE.insert_one({\n \"title\": self.title,\n \"year\": self.year\n })", "def load_meta(fname, data_id=''):\n # TODO: expand functionality?\n with open(fname+data_id+'_meta.pkl', 'rb') as f:\n meta = pickle.load(f)\n return meta", "def newsong(self, filename):\n datas = None\n try:\n unicode(filename)\n except UnicodeDecodeError:\n return\n\n cur = self.conn.cursor()\n\n try:\n datas = mutagen.File(filename, easy=True)\n except:\n query = \"\"\"INSERT INTO caro_logs (filename, message, date_import) VALUES (%s, 'ERROR 01', now());\"\"\"\n cur.execute(query, (filename,))\n\n\n if datas is not None:\n artist = None\n album = None\n title = None\n genre = None\n\n try:\n artist = datas['artist'][0]\n album = datas['album'][0]\n title = datas['title'][0]\n genre = datas['genre'][0]\n except KeyError as e:\n msg = str(sys.exc_type), \":\", \"%s is not in the list.\" % sys.exc_value\n\n query = \"\"\"INSERT INTO caro_logs (filename, message, date_import) VALUES (%s, %s, now());\"\"\"\n cur.execute(query, (filename, msg, ))\n\n if artist and album and genre and title:\n fsig = hashfile(filename)\n chk = self.checkfile(fsig)\n if chk == 0:\n self.insertfile([filename, artist, album, title, genre, fsig])\n else:\n self.update_path(filename, fsig)\n else:\n print \"Missing tag\"\n self.conn.commit()", "def _store_meta(storage_args):\n log.debug(\"StorageObj: storing media %s\", storage_args)\n try:\n config.session.execute(StorageNumpy._prepared_store_meta,\n [storage_args.storage_id, storage_args.class_name,\n storage_args.name])\n except Exception as ex:\n log.warn(\"Error creating the StorageNumpy metadata with args: %s\" % str(storage_args))\n raise ex", "def add_file(self, fieldname, filename, fileHandle, mimetype=None):\n body = fileHandle.read()\n if mimetype is None:\n mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n self.files.append((fieldname, filename, mimetype, body))\n return", "def add_metadata(self, column_name, data_type=None, version=None, description=None, dbname=None, delimiter='\\t'):\n data_line = '##COLUMNNAME='+'\"'+ column_name +'\"'\n if column_name not in self.metadata:\n if data_type:\n if data_type not in ['Float', 'String', 'Integer']:\n raise SyntaxError(\"Type must be 'Float', 'String' or 'Integer'. You tried: %s\" % data_type)\n data_line += delimiter + 'TYPE=\"' + data_type + '\"'\n if version:\n data_line += delimiter + 'VERSION=\"' + version + '\"'\n if description:\n data_line += delimiter + 'DESCRIPTION=\"' + description + '\"'\n if dbname:\n data_line += delimiter + 'SCOUTHEADER=\"' + dbname + '\"'\n self.metadata.pop(column_name, 0)\n self.metadata[column_name] = data_line\n return", "def gen_metadata(args):\n with open(args.bibfile) as bibfile:\n bib_db = BibTexParser(common_strings=True).parse_file(bibfile)\n entries = sorted(list(bib_db.entries),\n key=lambda x: x['year'], reverse=True)\n list([update_file(entry) for entry in entries])\n annotations = [entry_to_annotation(entry, args.PI) for entry in entries]\n stream = open(args.metadata, 'w')\n yaml.dump(annotations, stream, width=192, default_flow_style=False)\n stream.close()", "def test_directory_meta(self):\n with TemporaryDirectory() as td:\n store = tb.HDFFile(td + '/test', 'w', type='directory')\n store['AAPL'] = df\n store.handle.meta('testtest', 123)\n store.table.meta('testtest', 456)\n store.close()\n\n # reload\n store = tb.HDFFile(td + '/test')\n assert store.handle.meta('testtest') == 123\n assert store.table.meta('testtest') == 456", "def items_from_meta_file(cls, rel_meta_path: pl.Path) -> tt.PathMetadataPairGen:\n pass", "def store(self, filename):", "def add_file(self, filename):\n f = open(filename, 'r', encoding='utf8', errors='ignore')\n text = f.read()\n f.close()\n self.add_string(text)", "def und_add_files_to_db(udb_file, repository_dir):\n subprocess.call(f\"und add -db {udb_file} {repository_dir}\", shell=True)", "def WriteMetadata(self, metadata, overwrite=True):\n if not overwrite and 'meta' in metadata:\n raise errors.KeyczarError('\"meta\" attribute already exists')\n self.dict['meta'] = str(metadata)", "def set_stewi_meta(file_name, stewiformat=''):\n stewi_meta = FileMeta()\n stewi_meta.name_data = file_name\n stewi_meta.category = stewiformat\n stewi_meta.tool = \"StEWI\"\n stewi_meta.tool_version = STEWI_VERSION\n stewi_meta.ext = WRITE_FORMAT\n stewi_meta.git_hash = GIT_HASH\n stewi_meta.date_created = datetime.now().strftime('%d-%b-%Y')\n return stewi_meta", "def _add_atom_metadata(self, name, option):\n self.atom_metadata[name] = self._get_option(option)", "def _get_file_meta(cls, file=None, file_path=None):\n if not file and file_path:\n file = open(file_path, 'r')\n file_body = file.read()\n meta = {\n 'title': file.name, # read title from html\n 'subtitle': 'dupa', # read from html\n 'slug': os.path.splitext(os.path.basename(file.name))[0],\n 'abstract_html': 'Abstract', \n 'body_html': file_body,\n 'tags': [db.Category('one tag'), db.Category('second tag')],\n }\n if file_path:\n meta.update({\n 'date_created': datetime.datetime.fromtimestamp(os.path.getctime(file_path)),\n 'date_updated': datetime.datetime.fromtimestamp(os.path.getmtime(file_path)),\n })\n return meta", "def _update_filesystem_metadata(self, metadata):\n directory, fname = os.path.split(self.fname)\n fbase = os.path.splitext(fname)[0]\n \n # Test for presence and size of zip file\n zip_file = fbase + '.zip'\n zip_path = os.path.join(directory, zip_file)\n \n if os.path.isfile(zip_path):\n location = 'on_disk'\n data_file_size = os.path.getsize(zip_path)\n else:\n location = 'on_tape'\n data_file_size = 0\n \n # Test for presence of quick look PNG file\n quicklook_file = fbase + '.png'\n quicklook_path = os.path.join(directory, quicklook_file)\n \n if not os.path.isfile(quicklook_path):\n quicklook_file = ''\n\n # Add to metadata dictionary\n item_map = {'directory': directory, 'metadata_file': fname,\n 'data_file': zip_file, 'location': location, \n 'data_file_size': data_file_size, 'quicklook_file': quicklook_file}\n \n for key, value in item_map.items():\n metadata[key] = value", "def read_meta(metafn=None):\n\n metadata = {}\n\n # potential future improvement: strip quotation marks from strings, where applicable. Will then need to adjust\n # the indices used to get the dates and times in the functions above \n # (get_DEM_img_times: dtstrings = {\"sourceImage1\":(5,19, '%Y%m%d%H%M%S')})\n\n #each key is equated with '='. This loop strips and seperates then fills the dictonary.\n with open(metafn) as f: \n for line in f:\n if not line.strip(';') == \"END\":\n val = line.strip().split('=')\n if len(val) == 1:\n continue\n else:\n metadata.setdefault(val[0].strip(), []).append(val[1].strip().strip(';')) \n else:\n break\n\t\n return metadata", "def git_add(self, filepath):\n filepath = os.path.normpath(filepath)\n self.repo.index.add([filepath])\n\n return filepath", "def add_file(self, filename):\r\n f = open(filename, 'r', encoding='utf8', errors='ignore')\r\n text = f.read()\r\n self.add_string(text)\r\n f.close()", "def add_file(self, filename):\n f = open(filename, 'r', encoding='utf8', errors='ignore')\n s = f.read() \n self.add_string(s)", "def add_metadata(self, name, val: str = None):\n if isinstance(name, Metadata):\n self._metadata.append(name)\n if isinstance(name, str):\n self._metadata.append(Metadata(name, val))\n if isinstance(name, dict):\n for name, value in name.items():\n self._metadata.append(Metadata(name, value))", "def add_data(self, filename, content, redirect, session, bucket=None):\n\n self.logger.debug(\"Adding data\")\n\n filename = '/%s' % filename\n\n self._add_files('data', {filename: content}, session, bucket)\n\n self.logger.debug(\"Added data\")", "def test_directory_meta(self):\n with TemporaryDirectory() as td:\n store = tb.OBTFile(td + '/test', 'w', 'symbol', type='directory')\n store['AAPL'] = df\n store.handle.meta('testtest', 123)\n store.obt.meta('testtest', 456)\n store.close()\n\n # reload\n store = tb.OBTFile(td + '/test')\n assert store.handle.meta('testtest') == 123\n assert store.obt.meta('testtest') == 456", "def __mag_file_append(self, file):\n t = Table.read(file, format=\"ascii\")\n LightCurve.__mag_table_append(self, t)", "def add_metadata(ds, metadata):\n\n ds.attrs.update(metadata)\n\n return ds" ]
[ "0.68102056", "0.67017496", "0.6549066", "0.65284264", "0.64970475", "0.6281541", "0.62121874", "0.61612666", "0.61489415", "0.6141335", "0.60017574", "0.5996768", "0.5930677", "0.59127736", "0.5901605", "0.5892127", "0.58494866", "0.5829368", "0.5819864", "0.58169425", "0.57619125", "0.5730633", "0.57216865", "0.57141954", "0.57110304", "0.5706736", "0.5691373", "0.5682752", "0.5671062", "0.5640272", "0.56338435", "0.5602189", "0.5584625", "0.55755526", "0.55632883", "0.5548885", "0.55456316", "0.55456316", "0.55024505", "0.5492549", "0.5484515", "0.5480075", "0.54722923", "0.54695976", "0.54652894", "0.546119", "0.54596496", "0.54586375", "0.54512", "0.5443962", "0.5423754", "0.54124707", "0.54117095", "0.5409976", "0.54021007", "0.5392793", "0.53830135", "0.5379874", "0.53676397", "0.53617835", "0.5358598", "0.5353419", "0.53461087", "0.5342364", "0.53328127", "0.533271", "0.53294045", "0.53246295", "0.53246295", "0.53221154", "0.5320431", "0.5318283", "0.53155696", "0.53128994", "0.53065455", "0.5295717", "0.5273211", "0.5252985", "0.52454764", "0.5243236", "0.5242046", "0.52394575", "0.52271", "0.52132654", "0.5204065", "0.5203898", "0.5203505", "0.51902723", "0.51893955", "0.5188141", "0.51846904", "0.51782596", "0.51674414", "0.5165773", "0.5163828", "0.51593107", "0.5157625", "0.5155813", "0.5152743", "0.51454633" ]
0.6100383
10
This is the main function of the program
def main(): global FPSCLOCK, DISPLAYSURF, BASICFONT pygame.init() FPSCLOCK = pygame.time.Clock() # set up the window BASICFONT = pygame.font.Font('freesansbold.ttf', 20) DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), 0, 32) pygame.display.set_caption('Othello') DISPLAYSURF.fill(BGCOLOR) #Initialization of the variable ############################### testg = 0 #variable to contain result of testwinner board = init_board() #variable to contain the board turn = 1 # whose turn is it {1: black, 2: white} drawBoard() while True: clicked_box = () #init legals = possible(board,turn) for e in pygame.event.get(): if e.type == QUIT: #handling quitting pygame.quit() sys.exit() elif e.type == MOUSEBUTTONUP: #handling click mousex,mousey = e.pos #record a click and its position clicked_box = isInBoard(mousex,mousey) if clicked_box != () and clicked_box in legals: #if it the clicked box is a legal move, make the move player_move = Move(clicked_box,turn, board) player_move.make(board) winner = test_winner(board) if winner: #if true : game is not done #tests the winner if the game is done if winner == 1: print "Black player winns" elif winner == 2: print "White player wins." elif winner == 3: print "This is a tie game !" else: turn = 2/(winner-3) # if res= 4 it is black's turn if it is 5 it is white's turn' turn = 2/turn pygame.display.update() FPSCLOCK.tick(FPS)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():\n return", "def main():\n pass", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main(self):", "def main():\n\tpass", "def main(self):\r\n pass", "def main(self) -> None:\n pass", "def main(args):", "def main(args):", "def main() -> None:\n return", "def main(args=None):", "def main(args=None):", "def main():\n\n pass", "def main():\n ...", "def main():\n Main()", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n return 0", "def main():\n return 0", "def main():\n run_program()", "def main():\n pass", "def main():\n\n pass\n\n return None", "def main():\n\n pass\n\n return None", "def main():\n print(\"It works!!! ;-)\")\n ###TODO### do something with the various methods/functions of this file", "def run():\n main()", "def main(self):\n pass", "def main():\n print(\"is Running!\")", "def main():\r\n print(\"JoJo\")", "def main(args=None):\n pass", "def main(cls):\n raise NotImplementedError", "def main(self, params):\n pass", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Everythin is ok\")", "def main():\n example()", "def main():\n example()", "def main():\n example()", "def main():\n example()", "def main():\n\n BASIC.run(PROGRAM)", "def main():\n\tcli = Cli()\n\tcli.run()", "def main(ctx, verbose):\n return", "def main():\n\tshow_program_intro()\n\tbyte_lines = read_rain_gauge_sunnyside_school()\n\t#print_rain_guage_output(byte_lines)\n\ttotals_dict = parse_regex_daily_total(byte_lines)\n\ttotals_list = sort_rain_dictionary(totals_dict)\n\thighest_rainfall = get_day_highest_rainfall(totals_list)\n\tprint_highest_rainfall(highest_rainfall)\n\tyear_highest_rain = get_year_with_most_rain(totals_list)\n\tprint_year_most_rain(year_highest_rain)", "def main(argv: Sequence[Text]) -> None:\n\n\n print(\"TODO\")", "def main():\n pass\n\n if __name__ == \"__main)__\":\n main()", "def main():\n print(\"Everything is ok.\")", "def run_main():\n main(sys.argv)", "def main(self, **kwargs) -> None:\n ...", "def main(source):\n pass", "def main():\n boba_blast_game.main()", "def main():\n produce()", "def main():\n args = parse_args()\n process_args(args)", "def main():\n sys.exit(RBExt().run(sys.argv[1:]))", "def run():\n main(sys.argv[1:])" ]
[ "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.9220979", "0.88752645", "0.884131", "0.87605006", "0.87605006", "0.87605006", "0.87605006", "0.87069756", "0.86462677", "0.8444511", "0.8374563", "0.8374252", "0.8374252", "0.8282936", "0.8263522", "0.8263522", "0.8260855", "0.82572824", "0.82571423", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.8227792", "0.79938954", "0.79938954", "0.7950343", "0.79198045", "0.7819991", "0.7819991", "0.77776384", "0.77709705", "0.76985157", "0.7694003", "0.76794785", "0.76481557", "0.75993186", "0.7455455", "0.7427188", "0.7427188", "0.7427188", "0.73926187", "0.7346726", "0.7346726", "0.7346726", "0.7346726", "0.7299965", "0.7257477", "0.725736", "0.7237839", "0.72102064", "0.72097594", "0.72020626", "0.7186623", "0.71813405", "0.7151879", "0.714706", "0.71298975", "0.71168196", "0.7096967", "0.7055168" ]
0.0
-1
This is the method that draws the game board
def drawBoard(): #draw 64 Rectangles from (MARGINH,MARGINV) with CASESIZE sizes for i in range(BOARDSIZE): for j in range(BOARDSIZE): pygame.draw.rect(DISPLAYSURF, BLACK, [MARGINH + (i)*CASESIZE, MARGINV + (j)*CASESIZE, CASESIZE, CASESIZE], 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drawBoard(self):\r\n \r\n for i in range(8):\r\n for j in range(8):\r\n if (i %2 == 0 and j % 2 == 0) or (i % 2 !=0 and j % 2 != 0):\r\n COLOR = COLOR1\r\n else: COLOR = COLOR2\r\n pygame.draw.rect(screen, COLOR, Rect(i*50, j*50, 50, 50))\r\n\r\n self.drawLabels()\r\n \r\n if not self.piecesDrawn:\r\n self.drawPieces()\r\n self.piecesDrawn = True", "def drawBoard(self):\r\n self.outer.draw(self.surface)\r\n self.background.draw(self.surface)\r\n for point in self.points:\r\n point.draw(self.surface)\r\n point.drawCheckers(self.surface)\r\n self.dice.draw(self.surface)\r\n self.message.draw(self.surface)\r\n self.checkerBox.draw(self.surface)\r\n self.checkerBox.drawCheckers(self.surface)\r\n for bar in self.bar:\r\n bar.draw(self.surface)\r\n bar.drawCheckers(self.surface)\r\n pygame.display.flip()", "def draw(self, win):\n for y in range(len(self.board)):\n for x, color in enumerate(self.board[y]):\n pygame.draw.rect(win, color, (self.x+x*self.cell_size, self.y+y*self.cell_size,\n self.cell_size, self.cell_size), 0)\n\n pygame.draw.rect(win, (0, 0, 0), (self.x, self.y, self.width, self.height), BORDER_THICKNESS)", "def draw_board(self):\r\n for i in range(self.size):\r\n for k in range(self.size):\r\n left = k * self.CELL_SIZE + (k+1) * self.BORDER_WIDTH\r\n top = i * self.CELL_SIZE + (i+1) * self.BORDER_WIDTH\r\n rect = pygame.Rect(left, top, self.CELL_SIZE, self.CELL_SIZE)\r\n color = self.BG_COLOR\r\n if self.map[i][k] == self.BLOCK_CHAR:\r\n color = self.BLOCK_COLOR\r\n elif self.map[i][k] == self.START_CHAR:\r\n color = self.START_COLOR\r\n elif self.map[i][k] == self.END_CHAR:\r\n color = self.END_COLOR\r\n elif (k, i) in self.path:\r\n color = self.PATH_COLOR\r\n pygame.draw.rect(self.screen, color, rect)", "def draw_board(self):\n pygame.draw.rect(background, BLACK, self.outline, 3)\n # Outline is inflated here for future use as a collidebox for the mouse\n self.outline.inflate_ip(20, 20)\n for i in range(self.size-1):\n for j in range(self.size-1):\n rect = pygame.Rect(5+GRID_SIZE+(GRID_SIZE*i), 5+GRID_SIZE+(GRID_SIZE*j), GRID_SIZE, GRID_SIZE)\n pygame.draw.rect(background, COLOR[BLACK], rect, 1)\n if self.size >= 13:\n for i in range(3):\n for j in range(3):\n coords = (5+4*GRID_SIZE+(GRID_SIZE*6*i), 5+4*GRID_SIZE+(GRID_SIZE*6*j))\n pygame.draw.circle(background, COLOR[BLACK], coords, 5, 0)\n screen.blit(background, (0, 0))\n pygame.display.update()", "def draw_board(self):\n for i in range(0, 800, 80):\n if i == 80:\n pygame.draw.line(self.screen, 'black', (i, 80), (i, 800), width=3)\n pygame.draw.line(self.screen, (0, 0, 128), (0, i), (720, i), width=5)\n continue\n pygame.draw.line(self.screen, 'black', (i, 80), (i, 800), width=3)\n pygame.draw.line(self.screen, 'black', (0, i), (720, i), width=3)\n for j in range(240, 800, 240):\n pygame.draw.line(self.screen, (0, 0, 128), (j, 80), (j, 800), width=5)\n pygame.draw.line(self.screen, (0, 0, 128), (0, j + 80), (720, j + 80), width=5)\n pygame.draw.line(self.screen, (0, 0, 128), (0, 80), (0, 800), width=5)", "def draw_board(self):\n self.window.fill(Colors.WHITE.value)\n self.draw_lines()\n self.draw_obstacles()", "def draw_gameBoard(self):\n\n # 15 horizontal lines\n for i in range(9):\n start_pixel_x = (i + 1) * CELL_PIXELS\n start_pixel_y = (0 + 1) * CELL_PIXELS\n end_pixel_x = (i + 1) * CELL_PIXELS\n end_pixel_y = (9 + 1) * CELL_PIXELS\n self.create_line(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y)\n\n # 15 vertical lines\n for j in range(9):\n start_pixel_x = (0 + 1) * CELL_PIXELS\n start_pixel_y = (j + 1) * CELL_PIXELS\n end_pixel_x = (9 + 1) * CELL_PIXELS\n end_pixel_y = (j + 1) * CELL_PIXELS\n self.create_line(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y)\n\n # place a \"star\" to particular intersections\n self.draw_star(3, 3)\n self.draw_star(7, 7)", "def draw_board(board_state):\n print(\" {} | {} | {} \".format(board_state[6], board_state[7], board_state[8]))\n print(\"-----------\")\n print(\" {} | {} | {} \".format(board_state[3], board_state[4], board_state[5]))\n print(\"-----------\")\n print(\" {} | {} | {} \".format(board_state[0], board_state[1], board_state[2]))", "def draw_board(win, board):\n win.fill(WHITE)\n \n #Draw board\n for row in board:\n for col in row:\n #Current grid position\n coords = col['coord']\n\n #coords\n h_fence_coords = (coords[0]*(SQUARESIZE+FENCEWIDTH), coords[1]*SQUARESIZE+FENCEWIDTH*(coords[1]-1))\n v_fence_coords = (coords[0]*SQUARESIZE+FENCEWIDTH*(coords[0]-1), coords[1]*(SQUARESIZE+FENCEWIDTH))\n board_spotcoords = (coords[0]*(SQUARESIZE+FENCEWIDTH), coords[1]*(SQUARESIZE+FENCEWIDTH))\n \n #Rect objects\n h_fence = pygame.Rect(h_fence_coords, ((2*SQUARESIZE+FENCEWIDTH),FENCEWIDTH))\n v_fence = pygame.Rect(v_fence_coords, (FENCEWIDTH,2*SQUARESIZE+FENCEWIDTH))\n board_spot = pygame.Rect(board_spotcoords, (SQUARESIZE,SQUARESIZE))\n \n #Draw horizontal fence \n if coords[1] != 0 and coords[1] != 9 and coords[0] != 8:\n if col['h'] == 1:\n pygame.draw.rect(win, RED, h_fence)\n elif col['h'] == 2:\n pygame.draw.rect(win, BLUE, h_fence)\n\n #Draw vertical fence\n if coords[0] != 0 and coords[0] != 9 and coords[1] != 9:\n if col['v'] == 1:\n pygame.draw.rect(win, RED, v_fence)\n elif col['v'] == 2:\n pygame.draw.rect(win, BLUE, v_fence)\n \n #Draw grid square\n if coords[1] != 9 and coords[0] != 9:\n if coords[1] == 0:\n pygame.draw.rect(win, LIGHTRED, board_spot)\n elif coords[1] == 8:\n pygame.draw.rect(win, LIGHTBLUE, board_spot)\n else:\n pygame.draw.rect(win, GRAY, board_spot)", "def draw_board(self):\n print(\"\\n\" * 10)\n print(\"-PRINTING BOARD-\")\n for row in self.grid:\n for column in row:\n print(column.character(), end=\"\")\n print() # to create a new line", "def draw_puzzle():\n # Define Baseboard\n baseboard = pygame.Rect(61, 70, 498, 498) # creates a rectangle object \n\n # Draw Baseboard\n pygame.draw.rect(RENDER_WINDOW, TEXTCOLOR, baseboard)\n\n tiles = GAME_PUZZLE.puzzle # fetch game puzzle\n\n gameboard = [] # mimics the puzzle_board.puzzle\n\n # define first tile position\n start_x = 62 \n start_y = 71\n\n # build a tile for each item in the game puzzle\n for i in range(0,len(tiles)):\n row = []\n for j in range(0, len(tiles[i])):\n\n if tiles[i][j] is not None: # only draw non - blank tile\n new_tile = pygame.Rect(start_x, start_y, 164, 164) # creates a rectangle object\n\n tile_txt = TILE_FONT.render(str(tiles[i][j]), True, TEXTCOLOR) # creates font \n\n row.append(new_tile) # add tile to row in 2d list\n\n pygame.draw.rect(RENDER_WINDOW, BUTTONCOLOR, new_tile) #draw title rectangle\n\n RENDER_WINDOW.blit(tile_txt, (new_tile.x + 40, new_tile.y + 20)) # render text centered on Tile\n else:\n new_tile = pygame.Rect(start_x, start_y, 164, 164) # creates a WHITE rectangle object\n row.append(new_tile)\n pygame.draw.rect(RENDER_WINDOW, TEXTCOLOR, new_tile) #draw title rectangle\n \n \n start_x += 166\n\n gameboard.append(row)\n start_x = 62 # reset for each row\n start_y += 166\n \n # update the global Board\n global BOARD\n BOARD = gameboard", "def __draw_board(self):\n\n COLOR = (0, 0, 0, 200)\n LINE_WIDTH = 2\n STAR_POINT_SIZE = 4\n FONT_SIZE = 18\n\n (boardSize, drawExtraStarPoints, starPointOffset) = self.settings\n boardSize -= 1\n stepX = self.innerWidth / boardSize\n stepY = self.innerHeight / boardSize\n labelBoardSpacing = self.borderSize / 2\n draw = ImageDraw.Draw(self.baseImage)\n font = ImageFont.truetype(\"assets/font_fifteentwenty.otf\", FONT_SIZE)\n\n # Draw lines and labels\n for i in range(0, boardSize + 1):\n x = self.borderSize + stepX * i\n label = chr(ord('A') + i)\n labelWidth, labelHeight = draw.textsize(label, font)\n\n draw.line([(x, self.borderSize), (x, self.innerHeight + self.borderSize)], COLOR, LINE_WIDTH)\n draw.text((x - labelWidth / 2, self.borderSize - labelHeight - labelBoardSpacing + labelHeight / 2), label, COLOR, font)\n draw.text((x - labelWidth / 2, self.borderSize + self.innerHeight + labelBoardSpacing - labelHeight / 2), label, COLOR, font)\n\n for i in range(0, boardSize + 1):\n y = self.borderSize + stepY * i\n label = str(boardSize - i + 1)\n labelWidth, labelHeight = draw.textsize(label, font)\n\n draw.line([(self.borderSize, y), (self.innerWidth + self.borderSize, y)], COLOR, LINE_WIDTH)\n draw.text((self.borderSize - labelWidth - labelBoardSpacing + labelWidth / 2, y - labelHeight / 2), label, COLOR, font)\n draw.text((self.borderSize + self.innerWidth + labelBoardSpacing - labelWidth / 2, y - labelHeight / 2), label, COLOR, font)\n\n # Calculate star point positions\n centerX = boardSize / 2 * stepX + self.borderSize\n centerY = boardSize / 2 * stepY + self.borderSize\n leftX = starPointOffset * stepX + self.borderSize\n rightX = (boardSize - starPointOffset) * stepX + self.borderSize\n topY = starPointOffset * stepY + self.borderSize\n bottomY = (boardSize - starPointOffset) * stepY + self.borderSize\n\n # Draw star points\n draw.ellipse([(centerX - STAR_POINT_SIZE, centerY - STAR_POINT_SIZE), (centerX + STAR_POINT_SIZE, centerY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(leftX - STAR_POINT_SIZE, topY - STAR_POINT_SIZE), (leftX + STAR_POINT_SIZE, topY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(rightX - STAR_POINT_SIZE, topY - STAR_POINT_SIZE), (rightX + STAR_POINT_SIZE, topY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(leftX - STAR_POINT_SIZE, bottomY - STAR_POINT_SIZE), (leftX + STAR_POINT_SIZE, bottomY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(rightX - STAR_POINT_SIZE, bottomY - STAR_POINT_SIZE), (rightX + STAR_POINT_SIZE, bottomY + STAR_POINT_SIZE)], COLOR)\n\n if drawExtraStarPoints:\n draw.ellipse([(centerX - STAR_POINT_SIZE, topY - STAR_POINT_SIZE), (centerX + STAR_POINT_SIZE, topY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(leftX - STAR_POINT_SIZE, centerY - STAR_POINT_SIZE), (leftX + STAR_POINT_SIZE, centerY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(centerX - STAR_POINT_SIZE, bottomY - STAR_POINT_SIZE), (centerX + STAR_POINT_SIZE, bottomY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(rightX - STAR_POINT_SIZE, centerY - STAR_POINT_SIZE), (rightX + STAR_POINT_SIZE, centerY + STAR_POINT_SIZE)], COLOR)", "def draw(self):\n ui.clear()\n ui.draw_board(self)\n ui.output_buffer()", "def draw(self):\n i = 0\n self.window.fill((60,50,20))\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n pygame.draw.rect(self.window, ((i+j)%2*255, (i+j)%2*255, (i+j)%2*255), (20+j*100, 20+i*100, 100, 100))\n if self.board[i][j] != 0:\n if self.board[i][j].player == 0:\n color = (200, 0, 0)\n else:\n color = (0, 0, 200)\n if self.board[i][j].direction == 0:\n pygame.draw.ellipse(self.window, color, (30+j*100, 40+i*100, 80, 60))\n elif self.board[i][j].direction == 1:\n pygame.draw.ellipse(self.window, color, (40+j*100, 30+i*100, 60, 80))\n if self.board[i][j].master:\n if self.board[i][j].direction == 0:\n pygame.draw.ellipse(self.window, (255,255,0), (40+j*100, 50+i*100, 60, 40))\n pygame.draw.ellipse(self.window, color, (45+j*100, 55+i*100, 50, 30))\n elif self.board[i][j].direction == 1:\n pygame.draw.ellipse(self.window, (255,255,0), (50+j*100, 40+i*100, 40, 60))\n pygame.draw.ellipse(self.window, color, (55+j*100, 45+i*100, 30, 50))\n \n if self.selected != None:\n pygame.draw.rect(self.window, (200, 200, 0), (20+self.selected[1]*100, 20+self.selected[0]*100, 100, 100), 5)\n pygame.display.flip()", "def draw_board(self) -> None:\n # -> establishment of new dimensions for the canvas :\n side_size = self.side_size\n wide, high = side_size * self.n_col, side_size * self.n_row\n self.can.configure(width=wide, height=high)\n # Layout of the grid:\n self.can.delete(tkinter.ALL) # erasing of the past Layouts\n s = side_size\n for _ in range(self.n_row - 1): # horizontal lines\n self.can.create_line(0, s, wide, s, fill=\"white\")\n s += side_size\n s = side_size\n for _ in range(self.n_col - 1): # vertical lines\n self.can.create_line(s, 0, s, high, fill=\"white\")\n s += side_size\n # Layout of all the pawns,\n # white or black according to the state of the game :\n for row in range(self.n_row):\n for col in range(self.n_col):\n x1 = col * side_size + 3 # size of pawns =\n x2 = (col + 1) * side_size - 3 # size of the case - 10\n y1 = row * side_size + 3 #\n y2 = (row + 1) * side_size - 3\n color = self.color(row, col)\n self.can.create_oval(x1, y1, x2, y2, outline=\"grey\",\n width=1, fill=color)", "def draw_board(self):\n board = \"-------------------\\n\"\n board += \"| %s | %s | %s |\\n\" % (self.board_values[1], self.board_values[2], self.board_values[3])\n board += \"-------------------\\n\"\n board += \"| %s | %s | %s |\\n\" % (self.board_values[4], self.board_values[5], self.board_values[6])\n board += \"-------------------\\n\"\n board += \"| %s | %s | %s |\\n\" % (self.board_values[7], self.board_values[8], self.board_values[9])\n board += \"-------------------\\n\"\n return board", "def displayBoard(board):\n\n # Background color.\n WINDOW.fill(BORDER)\n\n # Draws out each row.\n for i in range(0, 4):\n displayRow(board[i], 8, i)\n\n pygame.display.update()", "def _draw_blocks(self):\n\t\tsurface = pygame.display.get_surface()\n\t\tcolors = {\"J\": (15, 105, 245), \"I\": (85, 235, 255), \n\t\t\t\t \"L\":(255, 170, 0), \"S\": (45, 255, 55), \"Z\": (255, 4, 0),\n\t\t\t\t \"O\": (238, 255, 0), \"T\": (245, 0, 255)}\n\t\ty = math.floor((self.window_height - (self.window_height*0.9))/2)\n\t\tx = math.floor((self.window_width - ((self.window_height*0.9)/20)*10)/2)\n\t\tincrement = math.floor((self.window_height*0.9)/20)\n\t\t# loops through board and draws to the correct spot\n\t\tfor i in range(4, len(self.gameboard.get_board())):\n\t\t\tfor j in range(len(self.gameboard.get_board()[i])):\n\t\t\t\tx_incremented = math.floor(x + (increment * j))\n\t\t\t\ty_incremented = math.floor(y + (increment * (i-4)))\n\t\t\t\tif self.gameboard.get_board()[i][j][0] in colors:\n\t\t\t\t\tpygame.draw.rect(surface, colors[self.gameboard.get_board()[i][j][0]],\n\t\t\t\t\t\t\t\t\t(x_incremented, y_incremented, increment, increment))\n\t\t\t\t\t\t\t\t\t# x, y, x_wid, y_len\n\t\t\t\telse:\n\t\t\t\t\tpygame.draw.rect(surface, (0,0,0),\n\t\t\t\t\t\t\t\t\t(x_incremented, y_incremented, increment, increment))", "def draw_board(self, board: BoardModel):\n self._board = board\n self.delete(tk.ALL)\n\n for i in range(self._grid_size):\n for j in range(self._grid_size):\n char = self._board.get_game()[self.position_to_index((j, i), self._grid_size)]\n x1 = i * 60\n y1 = j * 60\n x2 = x1 + 60\n y2 = y1 + 60\n\n if char == UNEXPOSED:\n self.create_rectangle(x1, y1, x2, y2, fill=\"green\")\n elif char == POKEMON:\n self.create_rectangle(x1, y1, x2, y2, fill=\"yellow\")\n elif char.isdigit():\n self.create_rectangle(x1, y1, x2, y2, fill=\"#90EE90\")\n self.create_text(x1 + 60 / 2, y1 + 60 / 2, text=int(char))\n elif char == FLAG:\n self.create_rectangle(x1, y1, x2, y2, fill=\"red\")\n\n self.bind_clicks()", "def draw_board(screen):\n colors = [p.Color(\"white\"), p.Color(\"dark gray\")]\n\n for row in range(DIMENSION):\n for col in range(DIMENSION):\n # For all light squares: row + col => even\n # dark squares: row + col => odd\n color = colors[(row + col) % 2]\n p.draw.rect(screen, color, p.Rect(col * SQ_SIZE, row * SQ_SIZE, SQ_SIZE, SQ_SIZE))", "def draw():\n background(255)\n for i in range(COLS):\n for j in range(ROWS):\n if (BOARD[i][j] == 1): fill(0)\n else: fill(255)\n noStroke() # stroke(0)\n rect(i * CELL_SIZE, j * CELL_SIZE, CELL_SIZE, CELL_SIZE)\n if (PLAY):\n generate()", "def draw_game(self) -> None:\n\n self.screen.fill(THECOLORS['royalblue4'])\n self.our_board.draw(self.screen)\n self.game_status.draw(self.screen)\n self.heading_bar.draw(self.screen)\n\n if self.our_game_state == STATE_PREPARING:\n self.start_game_button.draw(self.screen)\n elif not self.our_game_state == STATE_READY_TO_START:\n self.their_board.draw(self.screen)", "def display_board(self):\n \n oled.fill(0)\n for r in range(_HEIGHT):\n for c in range(_WIDTH):\n sprite = _SPRITES[self.board[r * _WIDTH + c]]\n oled.blit(sprite, c * 9, (_HEIGHT - r - 1) * 8 + 16)\n \n r, c = self.last_play_rc\n if r != None:\n oled.hline(c * 9, (_HEIGHT - r - 1) * 8 + 16 + 6, 5, 1) \n self.draw_header()\n oled.show()", "def draw_board(surface, board, player_number):\n grid_img = pygame.image.load('images/grid.png')\n surface.blit(grid_img, (0, 0))\n for row in range(len(board.get_grid())):\n for col in range(len(board.get_grid()[0])):\n x = col * TILE_SIZE\n y = row * TILE_SIZE\n tile = board.get_coord(row, col)\n if tile is not None:\n draw_tile(surface, tile, x, y, castle_color=('images/' + CASTLES[player_number]))", "def draw_board(self):\n # Clears screen of all turtle drawings\n self.pen.clear()\n\n # Draw border and fill everything.\n self._draw_square(self.board_lft_x - self.border_size,\n self.board_top_y + self.border_size,\n self.board_side + 2 * self.border_size,\n self.border_color, True)\n\n # Draw white squares of board.\n self._draw_square(self.board_lft_x, self.board_top_y,\n self.board_side, self.square_light, True)\n\n # Draw dark squares of board.\n # Automatically add a square side to x.\n # Subtract that square side when row is odd.\n for row in range(8):\n x = self.board_lft_x + self.next_square - row % 2 * self.next_square\n y = self.board_top_y - row * self.next_square\n for col in range(4):\n self._draw_square(x, y, self.square_side_size, self.square_dark,\n True)\n x += 2 * self.next_square\n\n # Draw Notation 1-8 on border.\n for row in range(8):\n self._put_chr_at(chr(ord(str(8 - row))), row, -1, (0, 0, 0))\n\n # Draw Notation a-h on border.\n for col in range(8):\n self._put_chr_at(chr(ord('a') + col), 8, col, (0, 0, 0))\n\n # Draw White Turn.\n self._put_chr_at(\"Turn: White\", 9, 1, (0, 0, 0))", "def draw_board(board_list):\n\n # clears out the canvas to make an empty board\n canvas.delete(\"all\")\n\n # Horizontal Lines\n canvas.create_rectangle(600, 170, 0, 160, fill=\"black\")\n canvas.create_rectangle(600, 330, 0, 320, fill=\"black\")\n\n # Vertical Lines\n canvas.create_rectangle(210, 480, 200, 0, fill=\"black\")\n canvas.create_rectangle(410, 480, 400, 0, fill=\"black\")\n\n # iterate through the board and draw each of the positions\n for row in range(len(board_list)):\n for column in range(len(board_list[0])):\n\n if board_list[row][column] == \"x\":\n draw_x(row + 1, column + 1)\n elif board_list[row][column] == \"o\":\n draw_o(row + 1, column + 1)\n\n winner = check_win(board_list)\n\n # count how many empty spaces are left on the board\n empty_count = 0\n for i in range(len(board_list)):\n empty_count += board_list[i].count(0)\n\n # no spaces left and there is still no winner\n if (winner is None) and (empty_count == 0):\n winner = \"Draw\"\n winner_popup(winner)\n\n # There is a winner\n elif winner is not None:\n winner_popup(winner)", "def game_draw(self):\n pass", "def draw_game():\n # Fill window with background color\n RENDER_WINDOW.fill(BACKGROUNDCOLOR)\n\n # Draw Game Title\n draw_title()\n\n # Draw Puzzle\n draw_puzzle()\n \n # Draw buttons to GUI \n draw_buttons()\n\n # Draw Text\n draw_text() \n\n # Draw random toggle\n draw_rand_toggle()", "def draw():", "def draw_board(self):\n self.current_board = self.gameboard.copy()\n \n # Draw our rewards\n for r, row in enumerate(self.world_rewards):\n for c, reward in enumerate(row):\n if reward is not None:\n asset_key = reward.asset\n x = 64*(c+1)\n y = 64*(r+1)\n self.current_board.paste(\\\n self.assets[asset_key], (x,y), self.assets[asset_key])\n \n # Draw our creature\n cr_x, cr_y = self.creature.current_location\n x = 64*(cr_x + 1) # Should be the center of the tile\n y = 64*(cr_y + 1)\n creature_image = self.assets['beaver']\n if self.creature.facing == 'S':\n creature_image = creature_image.rotate(-180)\n elif self.creature.facing == 'E':\n creature_image = creature_image.rotate(-90)\n elif self.creature.facing == 'W':\n creature_image = creature_image.rotate(-270)\n self.current_board.paste(creature_image, (x,y), creature_image)", "def draw(self):\n\t\tself.screen.fill(pygame.Color('black'))\n\t\tfor column in self.model.blocks:\n\t\t\tfor block in column:\n\t\t\t\tr = pygame.Rect(block.left,\n\t\t\t\t\t\t\t\tblock.top,\n\t\t\t\t\t\t\t\tblock.size,\n\t\t\t\t\t\t\t\tblock.size)\n\t\t\t\tpygame.draw.rect(self.screen, block.color,r)\n\t\tpygame.display.update()", "def draw_grid(self):\n\n screen.fill(GREY)\n\n for row in self.grid:\n for cell in row:\n if cell.root:\n color = GREEN\n elif cell.goal:\n color = RED\n elif cell.value:\n color = DARK_BLUE\n elif cell.visited:\n color = LIGHT_BLUE\n elif cell.f:\n color = LIGHT_GREEN\n elif cell.wall:\n color = GRAY\n else:\n color = WHITE\n\n pygame.draw.rect(screen, color, cell.rect)\n\n x, y = cell.rect.x, cell.rect.y\n\n if cell.g:\n self.draw_score(x + 2, y + 2, cell.g)\n if cell.h:\n self.draw_score(x + 18, y + 2, cell.h)\n if cell.f:\n self.draw_score(x + 2, y + self.cell_size - 10, cell.f)", "def draw(self):\n\n self.squares.draw(self.screen)\n if not self.hide_grid:\n self.draw_grid()\n self.fleas.draw(self.screen)\n pygame.display.flip()", "def draw(self):\n self.drawLine()\n\n for l in range(0, self.height):\n print(\"|\", end='', flush=True)\n for c in range(0, self.width):\n print(\" \" + str(self.grid[l][c]) + \" |\", end='', flush=True)\n print(\"\\n\", end='', flush=True)\n\n self.drawLine()", "def __draw_board(self, state=None):\n if not state:\n state = self.state\n if self.game_started:\n print('\\r\\033[8A')\n print(emoji.emojize(' {state[0]} | {state[1]} | {state[2]} \\n___|___|___\\n'\n ' {state[3]} | {state[4]} | {state[5]} \\n___|___|___\\n'\n ' {state[6]} | {state[7]} | {state[8]} \\n | | \\n'.format(state=state)))", "def draw(self, screen):", "def draw(self):\n\t\tfor i in range(0, self.size):\n\t\t\tprint('\\n' + \"----\" * self.size)\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tprint(self.grid[i][j] + ' |', end=\" \")\n\t\tprint('\\n'+ \"----\" * self.size + '\\n')", "def draw(self):\n print(\"Drawing...\", end=' ')\n s = self.pixelsPerCell\n for h in range(self.height):\n for w in range(self.width):\n self.box[w][h] = self.canvas.create_rectangle(w*s, h*s, w*s+s, h*s+s,\n fill = \"gray\", outline = \"gray\")\n self.canvas.update()\n print(\"Done!\")", "def draw(self, board):\r\n terrain_centers = self._draw_terrain(board)\r\n self._draw_numbers(board, terrain_centers)\r\n self._draw_pieces(board, terrain_centers)\r\n if self.game.state.can_place_road():\r\n self._draw_piece_shadows(PieceType.road, board, terrain_centers)\r\n if self.game.state.can_place_settlement():\r\n self._draw_piece_shadows(PieceType.settlement, board, terrain_centers)\r\n if self.game.state.can_place_city():\r\n self._draw_piece_shadows(PieceType.city, board, terrain_centers)\r\n if self.game.state.can_move_robber():\r\n self._draw_piece_shadows(PieceType.robber, board, terrain_centers)\r\n\r\n if self.game.state.is_in_game():\r\n self._draw_ports(board, terrain_centers)\r\n else:\r\n self._draw_port_shadows(board, terrain_centers)", "def draw_grid(self):\n for square in range(COLS+1):\n #vertical lines\n start_pos = (helpers.get_col_left_p(square),helpers.get_row_top_p(0))\n end_pos = (helpers.get_col_left_p(square),helpers.get_row_top_p(ROWS))\n pygame.draw.line(g.screen,WHITE,start_pos,end_pos)\n for square in range(ROWS+1):\n #horizontal lines\n start_pos = (helpers.get_col_left_p(0),helpers.get_row_top_p(square))\n end_pos = (helpers.get_col_left_p(COLS),helpers.get_row_top_p(square))\n pygame.draw.line(g.screen,WHITE,start_pos,end_pos)", "def render_panel(self):\n size = self.env.GRID_NUM\n panel = self.PANEL_HEIGHT\n margin = self.MARGIN\n screen = self.screen\n cellWidth = self.CELL_WIDTH\n\n radius = (panel - 2 * margin) // 2\n x_left = margin\n x_right = (cellWidth + margin) * (self.env.GRID_NUM // 2) + margin\n y = margin\n w = (cellWidth + margin) * (self.env.GRID_NUM // 2) - margin\n h = panel - margin\n screenWidth = (cellWidth + margin) * size + margin\n\n DrawRect(screen, Colors.GRAY, [x_left, y, w, h])\n DrawRect(screen, Colors.GRAY, [x_right, y, w, h])\n DrawCircle(screen, Colors.BLACK, (x_left + margin + radius, margin + h // 2), radius)\n DrawCircle(screen, Colors.WHITE, (x_right + margin + radius, margin + h // 2), radius)\n if self.env.isBlackTurn():\n DrawPolygon(screen, Colors.RED, ((margin, margin), (margin + panel // 3, margin), (margin, margin + panel // 3)))\n else:\n DrawPolygon(screen, Colors.RED, ((x_right, margin), (x_right + panel // 3, margin), (x_right, margin + panel // 3)))\n\n black_disks = self.env.num_black_disks()\n white_disks = self.env.num_white_disks()\n black_score = self.env.black_score\n white_score = self.env.white_score\n black_ai = self.agents[Player.BLACK].name\n white_ai = self.agents[Player.WHITE].name\n\n pygame.font.init()\n self.font = pygame.font.SysFont('Comic Sans MS', 16)\n\n text = self.font.render(f'{black_ai} ({black_score}) {black_disks}', False, (255, 255, 255))\n text_rect = text.get_rect(midleft=(x_left + 2 * (margin + radius), panel // 2))\n self.screen.blit(text, text_rect)\n\n text = self.font.render(f'{white_ai} ({white_score}) {white_disks}', False, (255, 255, 255))\n text_rect = text.get_rect(midleft=(x_right + 2 * (margin + radius), panel // 2))\n self.screen.blit(text, (x_right + 2 * (margin + radius), panel // 2 - 10))", "def draw(self):\n for x in range(self.numRows):\n print self.grid[x]", "def run(self):\r\n \r\n if not self.gameOver:\r\n screen.fill(COLOR3)\r\n self.board.drawBoard()\r\n self.handleEvents()\r\n for piece in self.board.pieces.values():\r\n piece.update()\r\n else:\r\n self.resetGame()\r\n pygame.display.update()", "def draw_game_board(pencil: 'Turtle') -> None:\n pencil.pendown()\n pencil.left(90)\n pencil.forward(150)\n pencil.backward(50)\n pencil.left(90)\n pencil.forward(50)\n pencil.backward(150)\n pencil.forward(50)\n pencil.right(90)\n pencil.forward(50)\n pencil.backward(100)\n pencil.right(90)\n pencil.forward(50)\n pencil.backward(50)\n pencil.right(90)\n pencil.forward(50)\n pencil.backward(50)\n pencil.right(90)\n pencil.forward(100)\n pencil.right(90)\n pencil.penup()\n pencil.goto(0, 0)", "def handleDraw(self):\n print(\"Its a draaaw\")\n self.board.drawDrawBoard()\n\n self.ui.game = None\n if self.istournament:\n if self.tournamentgame.gamecount < 2:\n self.tournamentgame.gamecount = self.tournamentgame.gamecount+1\n self.board.displayTournamentDrawInfo(3-self.tournamentgame.gamecount)\n time.sleep(3)\n self.board = None\n self.ui.displayTournamentGame(True)\n else:\n winner = math.floor(random.random()*2+1)\n self.board.displayRandomizingWinner()\n time.sleep(2)\n if self.tournamentgame.winner == 1:\n self.board.drawWinBoard(1)\n if self.tournamentgame.winner == 2:\n self.board.drawWinBoard(-1)\n self.board = None\n self.ui.tournament.setWinner(winner)\n self.ui.displayCurrentTournament()", "def draw_game_state(screen, gs):\n draw_board(screen)\n draw_pieces(screen, gs.board)", "def renderall(self):\n\n if not self.isinitialized:\n return\n # clear display\n self.screen.fill(BGCOLOR)\n # draw the board\n self.drawBoard()\n # flip the display to show whatever we drew\n pygame.display.flip()", "def draw(self):\n self.screen.fill(WHITE)\n self.color_invalid()\n self.draw_selected()\n self.shade_locked_cells()\n self.draw_grid()\n self.draw_buttons()\n self.draw_numbers()", "def draw(self):\n if self.master != None :\n fill = Cell.FILLED_COLOR_BG\n outline = Cell.FILLED_COLOR_BORDER\n\n if not self.fill:\n fill = Cell.EMPTY_COLOR_BG\n outline = Cell.EMPTY_COLOR_BORDER\n walls[self.ord][self.abs] = 0\n else:\n walls[self.ord][self.abs] = 1\n\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = fill, outline = outline)", "def draw_grid(self):\n for x in range(0, WIDTH, TILESIZE):\n pg.draw.line(self.screen, LIGHTGREY, (x, 0), (x, HEIGHT))\n \n for y in range(0, HEIGHT, TILESIZE):\n pg.draw.line(self.screen, LIGHTGREY, (0, y), (WIDTH, y))", "def draw_board(screen, game):\n for row in range(3):\n for col in range(3):\n mark = game.board[row][col]\n if mark == 'X':\n mark_image = pygame.image.load(\"x_mark.png\")\n screen.blit(mark_image, get_xy_position(row, col))\n elif mark == 'O':\n mark_image = pygame.image.load(\"o_mark.png\")\n screen.blit(mark_image, get_xy_position(row, col))", "def draw(self, state):\n if state is None:\n state = self.model.current_state\n for row in range(len(self.model.maze.walls)):\n self.__draw_row_division()\n print(\" {0:2d} \".format(row), end='') # Imprime número da linha\n\n for col in range(len(self.model.maze.walls[0])):\n if self.model.maze.walls[row][col] == 1:\n print(\"|XXX\", end='') # Desenha parede\n elif self.model.goal_state.get_element(Coordinate(row, col)):\n if state.player.row == row and state.player.col == col:\n print(\"|G-P\", end='') # Desenha objetivo e jogador.\n elif state.get_element(Coordinate(row, col)):\n print(\"|G-B\", end='') # Desenha objetivo e caixa.\n else:\n print(\"| G\", end='') # Desenha objetivo\n elif state.player.row == row and state.player.col == col:\n print(\"| P\", end='') # Desenha jogador\n elif state.get_element(Coordinate(row, col)):\n print(\"| B\", end='') # Desenha caixa.\n else:\n print(\"| \", end='') # Desenha vazio\n print(\"|\")\n if row == (len(self.model.maze.walls) - 1):\n self.__draw_row_division()", "def draw_pieces(screen, board):\n for row in range(DIMENSION):\n for col in range(DIMENSION):\n piece = board[row][col]\n # Check for empty square\n if piece != \"--\":\n screen.blit(IMAGES[piece], p.Rect(col * SQ_SIZE, row * SQ_SIZE, SQ_SIZE, SQ_SIZE))", "def draw(self):\n if self.state == 'alive':\n for i in range(len(self.tail)):\n pygame.draw.rect(display, black, (squareToXPix(self.tail[-(i + 1)][0], objectSize), squareToYPix(self.tail[-(i + 1)][1], objectSize), objectSize, objectSize))\n\n pygame.draw.rect(display, black, (squareToXPix(self.x, objectSize), squareToYPix(self.y, objectSize), objectSize, objectSize))\n\n else:\n for i in range(len(self.tail)):\n pygame.draw.rect(display, red, (squareToXPix(self.tail[-(i + 1)][0], objectSize), squareToYPix(self.tail[-(i + 1)][1], objectSize), objectSize, objectSize))\n\n pygame.draw.rect(display, red, (squareToXPix(self.x, objectSize), squareToYPix(self.y, objectSize), objectSize, objectSize))", "def draw(self, win):\n # draw grid\n gap = self.width // 9\n for i in range(self.rows + 1):\n if i % 3 == 0 and i != 0:\n thick = 4\n else:\n thick = 1\n pygame.draw.line(win, (0, 0, 0), (0, i * gap), (self.width, i * gap), thick)\n pygame.draw.line(win, (0, 0, 0), (i * gap, 0), (i * gap, self.width), thick)\n\n # draw cubes\n for i in range(self.rows):\n for j in range(self.cols):\n self.cubes[i][j].draw(win)", "def __draw(self, display, color, size):\n\t\tif self.walls[0]: # up\n\t\t\tpygame.draw.line(display, color, (self.col * size , self.row * size) , (self.col * size + size, self.row * size))\n\t\tif self.walls[3]: # down\n\t\t\tpygame.draw.line(display, color, (self.col * size + size, self.row * size + size), (self.col * size , self.row * size + size))\n\t\tif self.walls[1]: #left\n\t\t\tpygame.draw.line(display, color, (self.col * size + size, self.row * size) , (self.col * size + size, self.row * size + size))\n\t\tif self.walls[2]: #right\n\t\t\tpygame.draw.line(display, color, (self.col * size , self.row * size + size), (self.col * size , self.row * size))\n\n\t\tif self.current:\n\t\t\tdraw_rect_with_alpha(display, self.CURRENT_COLOR, Vector((self.col, self.row)) * size, (size, size))\n\n\t\telif self.backtracked and self.SHOW_BACKTRACK:\n\t\t\tdraw_rect_with_alpha(display, self.BACKTRACKED_COLOR, Vector((self.col, self.row)) * size, (size, size))\n\n\t\telif self.visited:\n\t\t\tdraw_rect_with_alpha(display, self.VISITED_COLOR, Vector((self.col, self.row)) * size, (size, size))", "def draw_grid():\r\n screen.fill((0,0,0))\r\n pygame.draw.line(screen, (255,255,255),(WIDTH/3,0),(WIDTH/3,HEIGHT))\r\n pygame.draw.line(screen, (255,255,255),(2*WIDTH/3,0),(2*WIDTH/3,HEIGHT))\r\n pygame.draw.line(screen, (255,255,255),(0,HEIGHT/3),(WIDTH,HEIGHT/3))\r\n pygame.draw.line(screen, (255,255,255),(0,2*HEIGHT/3),(WIDTH,2*HEIGHT/3))", "def draw_grid(self):\n\n # Draw horizontal lines\n for row in range(self.num_rows + 1):\n left = row_column_to_pixels(row, 0)\n right = row_column_to_pixels(row, self.num_cols)\n pygame.draw.line(self.screen, COLOR_MAP['gray'], left, right)\n\n # Draw vertical lines\n for col in range(self.num_cols + 1):\n top = row_column_to_pixels(0, col)\n bottom = row_column_to_pixels(self.num_rows, col)\n pygame.draw.line(self.screen, COLOR_MAP['gray'], top, bottom)", "def draw_moves(game, board, piece):\n all_valid_actions = board.get_valid_actions(piece)\n board.draw(game.window)\n pygame.draw.circle(game.window, (0,255,0), (piece.x, piece.y), 50, 5)\n game.draw_valid_actions(all_valid_actions)\n pygame.display.update()\n #pygame.time.delay(30)", "def draw(self, win):\n pygame.draw.rect(win, self.color, self.rect)", "def draw():\r\n\r\n print('\\n+---+---+---+')\r\n for i in range(9):\r\n print('| ' + board[i] + ' ', end='')\r\n if (i + 1) % 3 == 0:\r\n print('|\\n+---+---+---+')", "def draw_grid(self) -> None:\n grid = self.life.curr_generation\n for row in range(self.cell_height):\n for column in range(self.cell_width):\n if grid[row][column] == 1:\n color = \"green\"\n else:\n color = \"white\"\n pygame.draw.rect(\n self.screen,\n pygame.Color(color),\n (column * self.cell_size, row * self.cell_size, self.cell_size, self.cell_size),\n )", "def draw(self):\n #create the label of the game\n label = tk.Label(self._master,\n bg=\"lightcoral\",\n fg=\"white\",\n text=\"Pokemon: Got 2 Find Them All!\",\n font=('Times New Roman', 20, 'bold'),\n heigh=2)\n label.pack(fill=tk.X)\n\n #the transformation between task_one and task_two\n if self._task==TASK_ONE:\n self._board = BoardView(self._master, self._grid_size, self._grid_size * 60)\n\n else:\n self._board = ImageBoardView(self._master, self._grid_size, self._grid_size * 60)\n\n self._board.pack()\n self._board.draw_board(self._model)\n\n self._bar = StatusBar(self._master, self._num_pokemon)\n self._bar.pack()", "def printBoard(self):", "def print_board(board):\n win = GraphWin('N-Rainhas', 850, 650)\n win.setBackground(color_rgb(188, 237, 145))\n title = Text(Point(400, 30), \"N-Rainhas\")\n title.setSize(20)\n title.draw(win)\n\n # Desenha tabuleiro principal\n rect = Rectangle(\n Point(150 - 5, 100 - 5),\n Point(650 + 5, 600 + 5)\n )\n rect.setFill('brown')\n rect.draw(win)\n\n # Desenha as casas no tabuleiro\n square = 500 / N\n for i in range(N):\n for j in range(N):\n if (i + j) % 2 == 0:\n x = 150 + i * square\n y = 100 + j * square\n rect = Rectangle(\n Point(x, y),\n Point(x + square, y + square)\n )\n rect.setFill('gray')\n rect.draw(win)\n\n # Desenha as peças no tabuleiro\n x = 150 + i * square\n y = 100 + board[i] * square\n cir = Circle(\n Point(x + 0.5 * square, y + 0.5 * square), 160 / N\n )\n cir.setFill('blue')\n cir.draw(win)\n\n win.getMouse()\n win.close()", "def update(self, board):\n for row in range(8):\n for col in range(8):\n if board[row, col] == -1:\n self.circles[row][col].undraw()\n self.circles[row][col].draw(self.win)\n self.circles[row][col].setFill(self.piece_colors[0])\n elif board[row, col] == -2:\n self.circles[row][col].undraw()\n self.circles[row][col].draw(self.win)\n self.circles[row][col].setFill(self.piece_colors[2])\n elif board[row, col] == 0:\n self.circles[row][col].undraw()\n self.pieces[row][col].setFill(self.frame_colors[(row+col)%2])\n elif board[row, col] == 1:\n self.circles[row][col].undraw()\n self.circles[row][col].draw(self.win)\n self.circles[row][col].setFill(self.piece_colors[1])\n elif board[row, col] == 2:\n self.circles[row][col].undraw()\n self.circles[row][col].draw(self.win)\n self.circles[row][col].setFill(self.piece_colors[3])", "def draw(self):\n self.screen.fill(Color.BLACK)\n self.screen.blit(self.red_block, self.apple)\n [self.screen.blit(self.green_block, xy) for xy in self.snake]\n self.screen.blit(self.white_bar, (0, 0))\n self.draw_text(str(self.score), self.score_pos, size=32)\n pygame.display.flip()", "def draw_grid(self):\n pygame.draw.rect(self.screen, BLACK,\n (*grid_pos, WIDTH - 150, HEIGHT-150), 2)\n for x in range(9):\n pygame.draw.line(\n self.screen,\n BLACK,\n (grid_pos[0] + (x * cell_size), grid_pos[1]),\n (grid_pos[0] + (x * cell_size), grid_pos[1] + 450),\n 2 if x % 3 == 0 else 1\n )\n pygame.draw.line(\n self.screen,\n BLACK,\n (grid_pos[0], grid_pos[1] + (x * cell_size)),\n (grid_pos[0] + 450, grid_pos[1] + (x * cell_size)),\n 2 if x % 3 == 0 else 1\n )", "def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)", "def init_board(self) -> None:\n\t\tself.canvas.create_rectangle(0, 0, self.canvas_width, self.canvas_height, fill=self.color_background)\n\t\tfor x in range(0, self.canvas_width, self.canvas_width//self.board_size):\n\t\t\tself.canvas.create_line(x, 0, x, self.canvas_width, fill=self.color_tile_border)\n\n\t\tfor y in range(0, self.canvas_width+1, self.canvas_height//self.board_size):\n\t\t\tself.canvas.create_line(0, y, self.canvas_height, y, fill=self.color_tile_border)\n\n\t\tself.text_area.delete('0.1', '2.1')", "def display_board(board):\n print(\" | |\")\n print(\" \" + board[7] + \" | \" + board[8] + \" | \" + board[9])\n print(\" | |\")\n display_hline()\n print(\" | |\")\n print(\" \" + board[4] + \" | \" + board[5] + \" | \" + board[6])\n print(\" | |\")\n display_hline()\n print(\" | |\")\n print(\" \" + board[1] + \" | \" + board[2] + \" | \" + board[3])\n print(\" | |\")", "def draw(self):\n if not self._move:\n px = self.get_pos_in_pixels()\n self.tile.draw(px.x, px.y, 32, 32)\n else:\n self._move.draw()", "def begin_draw(self):\n pygame.init()\n self.display = pygame.display.set_mode(self.disp_size)\n pygame.display.set_caption('Map Editing')\n font = pygame.font.SysFont(\"arial\", 15)\n strings = [\"Press ESC to Start Drawing Obstacles\",\n \"Click Left to Draw & Right to Erase\",\n \"To finish Drawing,press Escape \",\n \"During search, Escape or Close to Quit\",\n \"you can also draw during the search, but it won't ba saved\"]\n texts = [font.render(s, True, (255, 255, 255)) for s in strings]\n for i, text in enumerate(texts):\n self.display.blit(text, (self.disp_size[0]//20, i*20+self.disp_size[1]//20))\n pygame.display.update()\n main_screen = True\n while main_screen:\n print(\"Waiting for start\")\n event = pygame.event.wait()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n main_screen = False\n self.display.fill([255, 255, 255])\n grid.draw(self.display)\n pygame.display.update()\n print(\"Now painting\")\n while True:\n event = pygame.event.wait()\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n break\n pos = list((np.array(pygame.mouse.get_pos())/self.block_size).astype(int))\n if pygame.mouse.get_pressed() == (1, 0, 0):\n print(\"Add wall at\", pos)\n grid[pos].type = \"WALL\"\n grid[pos].draw(self.display, self.block_size)\n elif pygame.mouse.get_pressed() == (0, 0, 1):\n print(\"remove wall from\", pos)\n grid[pos].type = \"ROAD\"\n grid[pos].draw(self.display, self.block_size)\n pygame.display.update()", "def draw(self):\n\n super().draw()\n \n self.dim = self.getdim()\n start_x, start_y, = self.x(), self.y()\n\n for y in range(self.r):\n for x in range(self.c):\n x_pos, y_pos = start_x + (self.dim * x), start_y + (self.dim * y)\n self.tiles[y][x].resize(x_pos, y_pos, self.dim, self.dim)", "def clear_board(self):\n pygame.draw.rect(self.display, self.white, pygame.Rect(0, 0, self.window_x, self.window_y))\n self.draw_grid()", "def draw(self):", "def draw(self, state):\n self.__draw_tiles(state)\n self.__draw_player(state)\n\n if state.game_over:\n self.__draw_message('Game Over! Press any key to exit. Final score: {}'.format(state.player.length))\n else:\n self.__draw_message('Score: {}'.format(state.player.length))\n\n self.arena_win.box()\n self.arena_win.refresh()\n self.message_win.refresh()", "def _render_board(self):\n for index, row in enumerate(self._board):\n print(index, end=' ') if index < 10 else print(index, end=' ')\n list(map(lambda x: print(x, end=' '), row))\n print()\n print(' ', end='')\n for i in range(len(self._board)):\n print(i, end=' ') if i < 10 else print(i, end=' ')\n print()", "def draw_frame(board,board_y,board_x):\n\t# drawing boards frames\n\tfor frame_x in range(0,board_x):\n\t\tboard.addch(0,frame_x,'-')\n\t\tboard.addch(pad_y,frame_x,'-')\n\n\tfor frame_y in range(0,board_y):\n\t\tboard.addch(frame_y,0,'|')\n\t\tboard.addch(frame_y,pad_x,'|')\n\n\t# making the edges prettier\n\tfor edge in [[0,0],[board_y,0],[0,pad_x],[pad_y,pad_x]]:\n\t\tboard.addch(edge[0],edge[1],'+')", "def visualisation(board):\n\n\n if board.width == 6:\n\n print(board.coordinates[30][1], \" \", board.coordinates[31][1], \" \", board.coordinates[32][1], \" \", board.coordinates[33][1], \" \", board.coordinates[34][1], \" \", board.coordinates[35][1])\n print(\" \")\n print(board.coordinates[24][1], \" \", board.coordinates[25][1], \" \", board.coordinates[26][1], \" \", board.coordinates[27][1], \" \", board.coordinates[28][1], \" \", board.coordinates[29][1])\n print(\" \")\n print(board.coordinates[18][1], \" \", board.coordinates[19][1], \" \", board.coordinates[20][1], \" \", board.coordinates[21][1], \" \", board.coordinates[22][1], \" \", board.coordinates[23][1])\n print(\" \")\n print(board.coordinates[12][1], \" \", board.coordinates[13][1], \" \", board.coordinates[14][1], \" \", board.coordinates[15][1], \" \", board.coordinates[16][1], \" \", board.coordinates[17][1])\n print(\" \")\n print(board.coordinates[6][1], \" \", board.coordinates[7][1], \" \", board.coordinates[8][1], \" \", board.coordinates[9][1], \" \", board.coordinates[10][1], \" \", board.coordinates[11][1])\n print(\" \")\n print(board.coordinates[0][1], \" \", board.coordinates[1][1], \" \", board.coordinates[2][1], \" \", board.coordinates[3][1], \" \", board.coordinates[4][1], \" \", board.coordinates[5][1])\n print(\" \")\n\n if board.width == 9:\n\n print(board.coordinates[72][1], \" \", board.coordinates[73][1], \" \", board.coordinates[74][1], \" \", board.coordinates[75][1], \" \", board.coordinates[76][1], \" \", board.coordinates[77][1], \" \", board.coordinates[78][1], \" \", board.coordinates[79][1], \" \", board.coordinates[80][1])\n print(\" \")\n print(board.coordinates[63][1], \" \", board.coordinates[64][1], \" \", board.coordinates[65][1], \" \", board.coordinates[66][1], \" \", board.coordinates[67][1], \" \", board.coordinates[68][1], \" \", board.coordinates[69][1], \" \", board.coordinates[70][1], \" \", board.coordinates[71][1])\n print(\" \")\n print(board.coordinates[54][1], \" \", board.coordinates[55][1], \" \", board.coordinates[56][1], \" \", board.coordinates[57][1], \" \", board.coordinates[58][1], \" \", board.coordinates[59][1], \" \", board.coordinates[60][1], \" \", board.coordinates[61][1], \" \", board.coordinates[62][1])\n print(\" \")\n print(board.coordinates[45][1], \" \", board.coordinates[46][1], \" \", board.coordinates[47][1], \" \", board.coordinates[48][1], \" \", board.coordinates[49][1], \" \", board.coordinates[50][1], \" \", board.coordinates[51][1], \" \", board.coordinates[52][1], \" \", board.coordinates[53][1])\n print(\" \")\n print(board.coordinates[36][1], \" \", board.coordinates[37][1], \" \", board.coordinates[38][1], \" \", board.coordinates[39][1], \" \", board.coordinates[40][1], \" \", board.coordinates[41][1], \" \", board.coordinates[42][1], \" \", board.coordinates[43][1], \" \", board.coordinates[44][1])\n print(\" \")\n print(board.coordinates[27][1], \" \", board.coordinates[28][1], \" \", board.coordinates[29][1], \" \", board.coordinates[30][1], \" \", board.coordinates[31][1], \" \", board.coordinates[32][1], \" \", board.coordinates[33][1], \" \", board.coordinates[34][1], \" \", board.coordinates[35][1])\n print(\" \")\n print(board.coordinates[18][1], \" \", board.coordinates[19][1], \" \", board.coordinates[20][1], \" \", board.coordinates[21][1], \" \", board.coordinates[22][1], \" \", board.coordinates[23][1], \" \", board.coordinates[24][1], \" \", board.coordinates[25][1], \" \", board.coordinates[26][1])\n print(\" \")\n print(board.coordinates[9][1], \" \", board.coordinates[10][1], \" \", board.coordinates[11][1], \" \", board.coordinates[12][1], \" \", board.coordinates[13][1], \" \", board.coordinates[14][1], \" \", board.coordinates[15][1], \" \", board.coordinates[16][1], \" \", board.coordinates[17][1])\n print(\" \")\n print(board.coordinates[0][1], \" \", board.coordinates[1][1], \" \", board.coordinates[2][1], \" \", board.coordinates[3][1], \" \", board.coordinates[4][1], \" \", board.coordinates[5][1], \" \", board.coordinates[6][1], \" \", board.coordinates[7][1], \" \", board.coordinates[8][1])\n print(\" \")\n\n\n if board.width == 12:\n print(board.coordinates[132][1], \" \", board.coordinates[133][1], \" \", board.coordinates[134][1], \" \", board.coordinates[135][1], \" \", board.coordinates[136][1], \" \", board.coordinates[137][1], \" \", board.coordinates[138][1], \" \", board.coordinates[139][1], \" \", board.coordinates[140][1], \" \", board.coordinates[141][1], \" \", board.coordinates[142][1], \" \", board.coordinates[143][1], \" \", )\n print(\" \")\n print(board.coordinates[120][1], \" \", board.coordinates[121][1], \" \", board.coordinates[122][1], \" \", board.coordinates[123][1], \" \", board.coordinates[124][1], \" \", board.coordinates[125][1], \" \", board.coordinates[126][1], \" \", board.coordinates[127][1], \" \", board.coordinates[128][1], \" \", board.coordinates[129][1], \" \", board.coordinates[130][1], \" \", board.coordinates[131][1], \" \", )\n print(\" \")\n print(board.coordinates[108][1], \" \", board.coordinates[109][1], \" \", board.coordinates[110][1], \" \", board.coordinates[111][1], \" \", board.coordinates[112][1], \" \", board.coordinates[113][1], \" \", board.coordinates[114][1], \" \", board.coordinates[115][1], \" \", board.coordinates[116][1], \" \", board.coordinates[117][1], \" \", board.coordinates[118][1], \" \", board.coordinates[119][1], \" \", )\n print(\" \")\n print(board.coordinates[96][1], \" \", board.coordinates[97][1], \" \", board.coordinates[98][1], \" \", board.coordinates[99][1], \" \", board.coordinates[100][1], \" \", board.coordinates[101][1], \" \", board.coordinates[102][1], \" \", board.coordinates[103][1], \" \", board.coordinates[104][1], \" \", board.coordinates[105][1], \" \", board.coordinates[106][1], \" \", board.coordinates[107][1], \" \", )\n print(\" \")\n print(board.coordinates[84][1], \" \", board.coordinates[85][1], \" \", board.coordinates[86][1], \" \", board.coordinates[87][1], \" \", board.coordinates[88][1], \" \", board.coordinates[89][1], \" \", board.coordinates[90][1], \" \", board.coordinates[91][1], \" \", board.coordinates[92][1], \" \", board.coordinates[93][1], \" \", board.coordinates[94][1], \" \", board.coordinates[95][1], \" \", )\n print(\" \")\n print(board.coordinates[72][1], \" \", board.coordinates[73][1], \" \", board.coordinates[74][1], \" \", board.coordinates[75][1], \" \", board.coordinates[76][1], \" \", board.coordinates[77][1], \" \", board.coordinates[78][1], \" \", board.coordinates[79][1], \" \", board.coordinates[80][1], \" \", board.coordinates[81][1], \" \", board.coordinates[82][1], \" \", board.coordinates[83][1], \" \", )\n print(\" \")\n print(board.coordinates[60][1], \" \", board.coordinates[61][1], \" \", board.coordinates[62][1], \" \", board.coordinates[63][1], \" \", board.coordinates[64][1], \" \", board.coordinates[65][1], \" \", board.coordinates[66][1], \" \", board.coordinates[67][1], \" \", board.coordinates[68][1], \" \", board.coordinates[69][1], \" \", board.coordinates[70][1], \" \", board.coordinates[71][1], \" \", )\n print(\" \")\n print(board.coordinates[48][1], \" \", board.coordinates[49][1], \" \", board.coordinates[50][1], \" \", board.coordinates[51][1], \" \", board.coordinates[52][1], \" \", board.coordinates[53][1], \" \", board.coordinates[54][1], \" \", board.coordinates[55][1], \" \", board.coordinates[56][1], \" \", board.coordinates[57][1], \" \", board.coordinates[58][1], \" \", board.coordinates[59][1], \" \", )\n print(\" \")\n print(board.coordinates[36][1], \" \", board.coordinates[37][1], \" \", board.coordinates[38][1], \" \", board.coordinates[39][1], \" \", board.coordinates[40][1], \" \", board.coordinates[41][1], \" \", board.coordinates[42][1], \" \", board.coordinates[43][1], \" \", board.coordinates[44][1], \" \", board.coordinates[45][1], \" \", board.coordinates[46][1], \" \", board.coordinates[47][1], \" \", )\n print(\" \")\n print(board.coordinates[24][1], \" \", board.coordinates[25][1], \" \", board.coordinates[26][1], \" \", board.coordinates[27][1], \" \", board.coordinates[28][1], \" \", board.coordinates[29][1], \" \", board.coordinates[30][1], \" \", board.coordinates[31][1], \" \", board.coordinates[32][1], \" \", board.coordinates[33][1], \" \", board.coordinates[34][1], \" \", board.coordinates[35][1], \" \", )\n print(\" \")\n print(board.coordinates[12][1], \" \", board.coordinates[13][1], \" \", board.coordinates[14][1], \" \", board.coordinates[15][1], \" \", board.coordinates[16][1], \" \", board.coordinates[17][1], \" \", board.coordinates[18][1], \" \", board.coordinates[19][1], \" \", board.coordinates[20][1], \" \", board.coordinates[21][1], \" \", board.coordinates[22][1], \" \", board.coordinates[23][1], \" \", )\n print(\" \")\n print(board.coordinates[0][1], \" \", board.coordinates[1][1], \" \", board.coordinates[2][1], \" \", board.coordinates[3][1], \" \", board.coordinates[4][1], \" \", board.coordinates[5][1], \" \", board.coordinates[6][1], \" \", board.coordinates[7][1], \" \", board.coordinates[8][1], \" \", board.coordinates[9][1], \" \", board.coordinates[10][1], \" \", board.coordinates[11][1], \" \", )\n print(\" \")", "def updateBoard():\n #Drawing the initial board positions;\n for y in range(1, n+1): #1,2,3\n for x in range(1, n+1):\n val = positions[y][x];\n colorNode((x,y), numColors[val])\n label = Text(Point((x-0.5)*grid_side, (y-0.5)*grid_side),val);\n label.setSize(30)\n label.draw(win)", "def draw_grid(self):\n self.screen.draw_many_tiles(tile for tile in self.iter_grid_tiles())\n pass", "def __draw_game(self) -> None:\n self.__draw_window()\n self.pipes.draw(self.win)\n self.player.draw(self.win)\n pygame.display.update()", "def display_board(self, board):\n\n print(\"\\n\\t - A - B - C - D - E - F - G - H - \\n\")\n print(\"\\t8 \", board[56], \"|\", board[57], \"|\", board[58], \"|\", board[59], \"|\", board[60], \"|\", board[61], \"|\", board[62], \"|\", board[63])\n print(\"\\t \", \"---------------------------------------\")\n print(\"\\t7 \", board[48], \"|\", board[49], \"|\", board[50], \"|\", board[51], \"|\", board[52], \"|\", board[53], \"|\", board[54], \"|\", board[55])\n print(\"\\t \", \"---------------------------------------\")\n print(\"\\t6 \", board[40], \"|\", board[41], \"|\", board[42], \"|\", board[43], \"|\", board[44], \"|\", board[45], \"|\", board[46], \"|\", board[47])\n print(\"\\t \", \"---------------------------------------\")\n print(\"\\t5 \", board[32], \"|\", board[33], \"|\", board[34], \"|\", board[35], \"|\", board[36], \"|\", board[37], \"|\", board[38], \"|\", board[39])\n print(\"\\t \", \"---------------------------------------\")\n print(\"\\t4 \", board[24], \"|\", board[25], \"|\", board[26], \"|\", board[27], \"|\", board[28], \"|\", board[29], \"|\", board[30], \"|\", board[31])\n print(\"\\t \", \"---------------------------------------\")\n print(\"\\t3 \", board[16], \"|\", board[17], \"|\", board[18], \"|\", board[19], \"|\", board[20], \"|\", board[21], \"|\", board[22], \"|\", board[23])\n print(\"\\t \", \"---------------------------------------\")\n print(\"\\t2 \", board[8], \"|\", board[9], \"|\", board[10], \"|\", board[11], \"|\", board[12], \"|\", board[13], \"|\", board[14], \"|\", board[15])\n print(\"\\t \", \"---------------------------------------\")\n print(\"\\t1 \", board[0], \"|\", board[1], \"|\", board[2], \"|\", board[3], \"|\", board[4], \"|\", board[5], \"|\", board[6], \"|\", board[7])\n print(\"\\n\\t - A - B - C - D - E - F - G - H - \\n\")", "def draw(self,screen):\n for tile in self.tile_list:\n screen.blit(tile[0],tile[1])\n # pygame.draw.rect(screen,(255,255,255),tile[1],2)\n\n for tile in self.objList:\n screen.blit(tile[0],tile[1])\n # pygame.draw.rect(screen,(255,255,255),tile[1],2)\n # rectangle print for tiles", "def draw_board(self, board: BoardModel):\n self._picture=[]\n self._board = board\n self.delete(tk.ALL)\n\n for i in range(self._grid_size):\n for j in range(self._grid_size):\n char = self._board.get_game()[self.position_to_index((j, i), self._grid_size)]\n x1 = i * 60\n y1 = j * 60\n x2 = x1 + 60\n y2 = y1 + 60\n\n #insert the image of unexposed cell\n if char == UNEXPOSED:\n photo=tk.PhotoImage(file=\"./images/unrevealed.gif\")\n\n\n # insert the image of exposed cell\n elif char == EXPOSED:\n photo=tk.PhotoImage(file=\"./images/zero_adjacent.gif\")\n\n # insert the image of cell of pokemon\n elif char == POKEMON:\n pokemon_list=[\"./images/pokemon_sprites/charizard.gif\",\n \"./images/pokemon_sprites/cyndaquil.gif\",\n \"./images/pokemon_sprites/pikachu.gif\",\n \"./images/pokemon_sprites/psyduck.gif\",\n \"./images/pokemon_sprites/togepi.gif\"]\n a=random.choice(pokemon_list)\n photo = tk.PhotoImage(file=a)\n print(a)\n\n # insert the image of cell of digit\n elif char.isdigit():\n if char==\"1\":\n photo = tk.PhotoImage(file=\"./images/one_adjacent.gif\")\n\n elif char==\"2\":\n photo = tk.PhotoImage(file=\"./images/two_adjacent.gif\")\n\n elif char==\"3\":\n photo = tk.PhotoImage(file=\"./images/three_adjacent.gif\")\n\n elif char==\"4\":\n photo = tk.PhotoImage(file=\"./images/four_adjacent.gif\")\n\n elif char==\"5\":\n photo = tk.PhotoImage(file=\"./images/five_adjacent.gif\")\n\n elif char==\"6\":\n photo = tk.PhotoImage(file=\"./images/six_adjacent.gif\")\n\n elif char==\"7\":\n photo = tk.PhotoImage(file=\"./images/seven_adjacent.gif\")\n\n else:\n photo = tk.PhotoImage(file=\"./images/eight_adjacent.gif\")\n\n # insert the image of pokeball\n elif char == FLAG:\n photo = tk.PhotoImage(file=\"./images/pokeball.gif\")\n\n self.create_image(x1 + 60 / 2, y1 + 60 / 2, image=photo)\n self._picture.append(photo)\n\n self.bind_clicks()", "def draw(self):\n self.screen.fill((0,51,102))\n # get the new drawables\n self.drawables = (self.game_model.get_background_drawables()\n + self.game_model.get_plane_drawables()\n + self.game_model.get_bullet_drawables()\n + self.game_model.get_enemy_drawables())\n for d in self.drawables:\n rect = d.get_rect()\n surf = d.get_surface()\n surf.set_colorkey((255,255,255))\n self.screen.blit(surf, rect)", "def draw(self, window, color):\n rect = (self.row*self.size, self.col*self.size, self.size, self.size)\n pygame.draw.rect(window, color, rect)", "def print_board(self):\n div = int(math.sqrt(self.BoardSize))\n dash = \"\"\n space = \"\"\n line = \"+\"\n sep = \"|\"\n for i in range(div):\n dash += \"----\"\n space += \" \"\n for i in range(div):\n line += dash + \"+\"\n sep += space + \"|\"\n for i in range(-1, self.BoardSize):\n if i != -1:\n print \"|\",\n for j in range(self.BoardSize):\n if self.CurrentGameBoard[i][j] > 9:\n print self.CurrentGameBoard[i][j],\n elif self.CurrentGameBoard[i][j] > 0:\n print \"\", self.CurrentGameBoard[i][j],\n else:\n print \" \",\n if (j+1 != self.BoardSize):\n if ((j+1)//div != j/div):\n print \"|\",\n else:\n print \"\",\n else:\n print \"|\"\n if ((i+1)//div != i/div):\n print line\n else:\n print sep", "def new_board(self):\n\n # delete all objects\n self.canvas.delete('all')\n\n # reset\n self.board = [\n [self.EMPTY, self.EMPTY, self.EMPTY],\n [self.EMPTY, self.EMPTY, self.EMPTY],\n [self.EMPTY, self.EMPTY, self.EMPTY]]\n\n # draw grid\n for n in range(1, 3):\n # vertical\n self.canvas.create_line(\n self.CELL_SIZE*n, 0,\n self.CELL_SIZE*n, self.WINDOW_SIZE,\n width=self.GRID_LINE_WIDTH, fill=self.GRID_COLOR)\n # horizontal\n self.canvas.create_line(\n 0, self.CELL_SIZE*n,\n self.WINDOW_SIZE, self.CELL_SIZE*n,\n width=self.GRID_LINE_WIDTH, fill=self.GRID_COLOR)", "def draw( self ):\n\n if self.__drawnGrid == 0:\n draw_grid().draw()\n\n self.__drawnGrid = 1\n\n column = 0\n row = 0\n i = 0\n for mark in self.__grid:\n if row == 0:\n turtle.goto(-60+60*column, 60)\n elif row == 1:\n turtle.goto(-60+60*column, 0)\n elif row == 2:\n turtle.goto(-60+60*column, -60)\n\n if isinstance(mark, str):\n if mark.lower() == 'x': \n drawX(i)\n elif mark.lower() == 'o':\n drawO(i)\n\n column += 1\n\n if column == 3:\n column = 0\n row += 1\n\n i+=1\n\n turtle.goto(-60, 60)", "def showBoard(self):\n \n brd = \"\\n | | \\n\" + \\\n \" \" + self.squares[0] + \" | \" + self.squares[1] + \" | \" + self.squares[2] + \" \\n\" + \\\n \"___|___|___\\n\" + \\\n \" | | \\n\" + \\\n \" \" + self.squares[3] + \" | \" + self.squares[4] + \" | \" + self.squares[5] + \" \\n\" + \\\n \"___|___|___\\n\" + \\\n \" | | \\n\" + \\\n \" \" + self.squares[6] + \" | \" + self.squares[7] + \" | \" + self.squares[8] + \" \\n\" + \\\n \" | | \\n\"\n\n return brd", "def draw_app(self):\n \n # Start iterations\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n self.display.fill([255, 255, 255])\n self.grid.draw(self.display)\n pygame.display.update()", "def draw(self):\n \n # Draw the background\n self.world.fill(BLUE)\n \n # Draw all the sprite lists that we have\n self.wall_list.draw(self.world)\n self.enemy_list.draw(self.world)\n self.sludge.draw(self.world)\n self.consumeable.draw(self.world)\n self.can_climb.draw(self.world)", "def graphic(board, player1=1, player2=2):\n width = board.width\n height = board.height\n\n print()\n print(player1, \"with X\".rjust(3))\n print(player2, \"with O\".rjust(3))\n print(' ', end='')\n for x in range(width):\n print(\"{0:4}\".format(x), end='')\n print()\n for i in range(height - 1, -1, -1):\n print(\"{0:4d}\".format(i), end='')\n for j in range(width):\n loc = i * width + j\n p = board.states.get(loc, -1)\n if p == player1:\n print('X'.center(4), end='')\n elif p == player2:\n print('O'.center(4), end='')\n else:\n print('_'.center(4), end='')\n print()", "def draw_board(self, dot_distance, dottie, height, width):\n for y in range(height):\n for i in range(width):\n dottie.dot()\n dottie.forward(dot_distance)\n dottie.backward(dot_distance * width)\n dottie.right(90)\n dottie.forward(dot_distance)\n dottie.left(90)", "def draw(self):\n self.screen_surf.fill(BKGD_COLOUR)\n self.all_tiles.draw(self.screen_surf) # Tiles before other sprites.\n self.nests.draw(self.screen_surf) # Nests before chipmunks.\n self.chipmunks.draw(self.screen_surf)\n self.acorns.draw(self.screen_surf)\n self.screen_surf.blit(self.acorn_surf, self.acorn_surf.get_rect())\n self.screen_surf.blit(self.timer_surf, self.timer_rect)", "def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()", "def render(board):\n #The top of the board render, which gives enough space for each square based on max square value\n spot_length = len(str(len(board)))\n rv = '-'*((spot_length+1)*len(board))+'-\\n' \n\n #Create a print statement for each line\n for i in range(len(board)): \n line = '|' \n\n #For each number in line, add to print line \n for j in range(len(board[i])): \n spot = str(board[i][j]) \n #If spot is a zero, replace for visuals with '_' \n if spot == '0': \n spot = '_'*spot_length\n #Add number to print line with a separator '|'\n line += ' '*(spot_length-len(spot)) + spot + '|' \n\n #Print the rendered line\n rv += line + '\\n' \n\n #Print the bottom of the board\n rv += '-'*((spot_length+1)*len(board))+'-' \n return rv" ]
[ "0.83789307", "0.83610886", "0.8352866", "0.82823396", "0.8200601", "0.81451064", "0.8119161", "0.81146264", "0.7879229", "0.7845098", "0.7784387", "0.77669483", "0.7705313", "0.7694651", "0.763012", "0.7628809", "0.7627039", "0.76243454", "0.76111424", "0.75869983", "0.7550797", "0.7542716", "0.7528937", "0.7514778", "0.75008976", "0.74552107", "0.7449227", "0.7447887", "0.7438565", "0.7422395", "0.73912257", "0.7319168", "0.73184806", "0.7315631", "0.73034376", "0.72805834", "0.7230437", "0.72137344", "0.72102654", "0.7206736", "0.7197284", "0.7177305", "0.7172486", "0.71252775", "0.7124246", "0.71171254", "0.71161306", "0.71158445", "0.7115477", "0.71068877", "0.7096635", "0.7092485", "0.7087497", "0.7083897", "0.7077159", "0.7072464", "0.70644957", "0.7047579", "0.7045289", "0.70372003", "0.7006009", "0.69891906", "0.6984161", "0.6979546", "0.69737375", "0.6964751", "0.69584006", "0.69551754", "0.6948032", "0.69413996", "0.69396055", "0.69385886", "0.69384015", "0.69352764", "0.6926235", "0.6919774", "0.6910341", "0.691023", "0.6906562", "0.690293", "0.68934846", "0.6889509", "0.68707716", "0.6865573", "0.68614376", "0.68598676", "0.68473464", "0.6843028", "0.68399066", "0.683266", "0.6831457", "0.6829718", "0.6821909", "0.6820431", "0.68196684", "0.6818383", "0.6812343", "0.68104124", "0.680739", "0.6795078" ]
0.79026276
8
This method draws a piece of the right color at the right position.
def drawPiece(pos,color): if color == 0: color_piece = BGCOLOR elif color == 1: color_piece = BLACK elif color == 2: color_piece = WHITE elif color == 3: color_piece = LIGHTGREEN #draws a circle of the right color on the board pygame.draw.ellipse(DISPLAYSURF, color_piece, [MARGINH + (pos[0]-1)*CASESIZE+4, MARGINV + (pos[1]-1)*CASESIZE+4, CASESIZE-8, CASESIZE-8])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _draw_square(self, left_x, top_y, side, color, fill):\n self.pen.up()\n self.pen.color(color)\n self.pen.goto(left_x, top_y)\n self.pen.down()\n self.pen.begin_fill()\n for _ in range(4):\n self.pen.forward(side)\n self.pen.right(90)\n self.pen.end_fill()", "def paint_square(self, pos, color, cr):\n cr.set_source_rgb(*color)\n i, j = pos\n cr.rectangle(i*DOT_SIZE+1, j*DOT_SIZE-1, DOT_SIZE-2, DOT_SIZE-2)\n cr.fill()", "def right():\n global x, canvas # x é modificado\n canvas.create_line(x, y, x + 10, y)\n x += 10", "def draw(self, color, x, y) -> None:\n y_pos = y * self.scale\n x_pos = x * self.scale\n pg.draw.rect(self.screen, color, [x_pos, y_pos, self.scale-self.offset, self.scale-self.offset])", "def drawRect(self,color,x1,y1,x2,y2):\n if not self.changed: self.edit()\n wmap = 512\n mapd = self.mapd\n for y in xrange(y1,y2):\n ymoff = wmap*y\n for x in xrange(x1,x2):\n mapd[x+ymoff] = color", "def create_right_end(self, pos):\n return Hook(b2Vec2(*pos) + b2Vec2(0.6, 0))", "def draw_block(position, color):\n x = position.col*DX+DX+2\n y = position.row*DY+DY+2\n width = DX-4\n height = DY-4\n pygame.draw.rect(screen, color, (x,y,width,height), 0)", "def draw_right_hand_box(data):\n box = data['rhb'][data['i']];\n return draw_hand_box(data,box,[0,0,255]);", "def draw_rect(self, i, j, col, d=0):\n pygame.draw.rect(self.screen, col, self.get_rect(i, j), d)", "def draw(self, color = Color.GREEN):\n self.image[self.x, self.y] = color", "def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)", "def draw(self, binary, leftx, rightx):\n\t\tfilled = np.zeros_like(binary)\n\t\tploty = np.linspace(0, filled.shape[0] - 1, filled.shape[0])\n\t\t# Recast the x and y points into usable format for cv2.fillpoly()\n\t\tpts_left = np.array([np.transpose(np.vstack([leftx, ploty]))])\n\t\tpts_right = np.array([np.flipud(np.transpose(np.vstack([rightx, ploty])))])\n\t\tpts = np.hstack((pts_left, pts_right))\n\t\t# Draw the lane onto the warped blank image\n\t\tcv2.fillPoly(filled, np.int_([pts]), (0, 255, 0))\n\t\treturn filled", "def draw():", "def DrawBase(screen, base_x, base_y, base_len, base_width):\n pygame.draw.rect(screen, (255,0,0),(base_x, base_y, base_len*2, base_width*2), 4)", "def draw_square(self, surface, color, position):\n rect = pygame.Rect(position, (50, 50))\n pygame.draw.rect(surface, color, rect)", "def draw_piece(self):\n self.screen.blit(self.image, self.rect)", "def drawRect (self, x, y, w, h, colour):\r\n for i in range (y,y+h):\r\n row = self.image [i]\r\n\r\n for j in range (x,x+w):\r\n row [j] = colour", "def draw_o(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.up()\r\n pen.forward(50)", "def draw_brick(self, x, y):\n pygame.draw.rect(self.main_surface, self.color, (x, y, self.width, self.height), 0)\n pygame.display.update()", "def draw(self):\n res = ''\n # ANSI code to clear the screen\n #res += chr(27) + \"[2J\"\n for position, value in enumerate(self.board.tttboard):\n if value is None:\n res += str(position)\n #sys.stdout.write(str(position))\n else:\n res += str(value)\n #sys.stdout.write(str(value))\n\n if (position + 1) % 3 != 0:\n res += str('|')\n #sys.stdout.write('|')\n else:\n #print ''\n\n res += str('\\n')\n if position == 2 or position == 5:\n #print '-' * 5\n\n res += '-' * 5\n res += str('\\n')\n return res", "def draw(self, screen, y, invert_color=False, fill_character=None):\n self.screen = screen\n string = self.generate_string()\n move(screen, y, 0)\n screen.clrtoeol()\n if fill_character:\n _, screen_cols = getmaxyx(screen)\n string = string.ljust(screen_cols, fill_character)\n if invert_color:\n screen.insstr(y, 0, string, curses.A_REVERSE)\n else:\n screen.insstr(y, 0, string)", "def render_wall(win, color, direction, pos):\n x, y = pos\n\n if direction == 'S':\n width = CELL_SIZE\n height = BORDER\n x = x*CELL_SIZE\n y = (y+1)*CELL_SIZE\n\n elif direction == 'E':\n width = BORDER\n height = CELL_SIZE\n x = (x+1)*CELL_SIZE\n y = y*CELL_SIZE\n\n pygame.draw.rect(win, color, (x, y, width, height))", "def drawRectangle(width, height, tilt, penColor, fillColor):\n Lucia.color(penColor,fillColor)\n Lucia.seth(tilt)\n Lucia.begin_fill()\n for i in range(2):\n Lucia.forward(width)\n Lucia.left(90)\n Lucia.forward(height)\n Lucia.left(90)\n Lucia.end_fill()", "def draw(self, window, color):\n rect = (self.row*self.size, self.col*self.size, self.size, self.size)\n pygame.draw.rect(window, color, rect)", "def draw_rect(self, color, position, size, border_width = 0, anchor= 'topleft'):\n # We'll try to make sure that everything is okay later\n \n color = spyral.color._determine(color)\n offset = self._calculate_offset(anchor, size)\n pygame.draw.rect(self._surf, color, (position + offset, size), border_width)", "def draw(self, screen):\r\n if self.selected:\r\n used_color = (255 - self.color[0], 255 - self.color[1], 255 - self.color[2])\r\n else:\r\n used_color = self.color\r\n pygame.draw.rect(screen, used_color,\r\n (self.location_top_left[0], self.location_top_left[1], self.size_x, self.size_y), 0)", "def DrawPanel(screen, panel_x, panel_y, panel_len, panel_width):\n pygame.draw.rect(screen, (255,0,0),(panel_x, panel_y, panel_len*2, panel_width*2), 4)", "def paint(self, draw, x, y, w, h):\n\t\tpass", "def draw_rectangle(img, st_pos=(0, 0), ed_pos=(640-1, 480-1),\n color=(1.0, 1.0, 0.0)):\n\n # convert `color` from float to int.\n max_value = np.iinfo(img.dtype).max\n color_val = np.array(color)\n color_val = np.round(color_val * max_value).astype(img.dtype)\n\n # conposite!\n st_h, st_v = st_pos\n ed_h, ed_v = ed_pos\n img[st_v, st_h:ed_h, :] = color_val\n img[ed_v, st_h:ed_h, :] = color_val\n img[st_v:ed_v, st_h, :] = color_val\n img[st_v:ed_v, ed_h, :] = color_val", "def pixel(self, x: int, y: int, colour: int, /) -> None:", "def fill_draw(self):\n self.draw = [x + str(y) for x in COLOR for y in CARD_VALUE]", "def rightKey(xCur, yCur, offset, g, currentOne, currentColor):\n if createConfiguration(xCur + 1, yCur, offset, g, currentOne):\n xCur += 1\n else:\n createConfiguration(xCur, yCur, offset, g, currentOne)\n return xCur, yCur, currentOne, currentColor, False, g, offset", "def draw(self,renderer,dx,dy):\n for i in self.itemType.find('display'):\n if i.tag == 'rect':\n colors = i.find('color').text[1:-1].split(',')\n SDL_SetRenderDrawColor(renderer,int(colors[0]),int(colors[1]),int(colors[2]),int(colors[3]) if len(colors) > 3 else 255)\n rect = SDL_Rect()\n rect.x, rect.y = self.getPos()\n rect.x, rect.y = rect.x+dx,rect.y+dy\n rect.w, rect.h = self.getSize()\n SDL_RenderFillRect(renderer,rect)", "def drawBoard(self):\r\n \r\n for i in range(8):\r\n for j in range(8):\r\n if (i %2 == 0 and j % 2 == 0) or (i % 2 !=0 and j % 2 != 0):\r\n COLOR = COLOR1\r\n else: COLOR = COLOR2\r\n pygame.draw.rect(screen, COLOR, Rect(i*50, j*50, 50, 50))\r\n\r\n self.drawLabels()\r\n \r\n if not self.piecesDrawn:\r\n self.drawPieces()\r\n self.piecesDrawn = True", "def _draw_red(self, intrusion):\n\n\t\tif intrusion is None:\n\t\t\treturn\n\n\t\tif intrusion not in self.POSSIBLE_INTRUSION_LEVELS:\n\t\t\traise ValueError(\"Given value [{}] for argument \\\"intrusion\\\" is invalid\".format(intrusion))\n\n\t\tfrom_point = Point(0, 0)\n\t\tto_point = Point(0, 0)\n\t\tcolour = Rgb()\n\n\t\tassert(len(self.POSSIBLE_INTRUSION_LEVELS) == 3)\n\n\t\t# Easy: 40 % strong_red / 316 * 316\n\t\tif intrusion == self.POSSIBLE_INTRUSION_LEVELS[0]:\n\t\t\tfrom_point = Point(92, 92)\n\t\t\tto_point = Point(407, 407)\n\t\t\tcolour = Rgb.strong_red()\n\t\t# Medium: 20 % med_red / 224 * 224\n\t\telif intrusion == self.POSSIBLE_INTRUSION_LEVELS[1]:\n\t\t\tfrom_point = Point(138, 138)\n\t\t\tto_point = Point(361, 361)\n\t\t\tcolour = Rgb.med_red()\n\t\t# Hard: 5 % light_red / 112 * 112\n\t\telif intrusion == self.POSSIBLE_INTRUSION_LEVELS[2]:\n\t\t\tfrom_point = Point(194, 194)\n\t\t\tto_point = Point(305, 305)\n\t\t\tcolour = Rgb.light_red()\n\t\telse:\n\t\t\traise NotImplementedError(\"draw_red: Intrusion level not implemented\")\n\n\t\t# TODO TEMP Currently intruded means ALL is red!\n\t\tfrom_point = Point(0, 0)\n\t\tto_point = Point(499, 499)\n\n\t\tself._draw_area(colour, from_point, to_point)", "def DrawSegment(self, p1, p2, color):\r\n pygame.draw.aaline(self.surface, color.bytes, p1, p2)", "def drawBorder(self,color,x1,y1,x2,y2,thick):\n self.drawRect(color,x1,y1,x2,y1+thick)\n self.drawRect(color,x1,y1,x1+thick,y2)\n self.drawRect(color,x2-thick,y1,x2,y2)\n self.drawRect(color,x1,y2-thick,x2,y2)", "def draw_rectangle(self, color, left_angle: Point, width, height):\n _lta = self.T.itrans(left_angle)\n pg.draw.rect(self.screen, color, (_lta()[0], _lta()[1], width, height))", "def go_right(self):\n self.rect.centerx += 9", "def draw_pixel(x, y, col):\n unicornhathd.set_pixel(x, 12 - y, col[0], col[1], col[2])", "def draw(self, cr, width, height):\n cr.set_source_rgb(0, 0, 0)\n cr.rectangle(0, 0, width, height)\n cr.fill()", "def render(self, context):\n pygame.draw.rect(context, (255, 0, 0), self.box)", "def fill(self, color):", "def drawTriangle(t, color, x, y):\n ## t.color(color)\n ## t.begin_fill()\n for i in range(3):\n t.forward(x)\n t.right(y)", "def draw_mirrored_cell(self, pos, color):\n self.hexes.set_cells(helpfunc.mirror_hexes(pos), color)", "def render(self, game):\n pygame.draw.rect(game.screen,\n self.colour,\n (int(self.x), int(self.y), self.a, self.b))", "def draw_pavement():\n\n roberto.penup()\n roberto.goto(-345, -100)\n roberto.pendown()\n roberto.begin_fill()\n for i in range(4): # this loop draws a big black rectangle that is positioned at the bottom part of the screen\n roberto.forward(684)\n roberto.right(90)\n roberto.end_fill()", "def draw_rect(r):\n for i in range(2):\n turtle.fd(r.width)\n turtle.lt(90)\n turtle.fd(r.height)\n turtle.lt(90)", "def draw_rect(r):\n for i in range(2):\n turtle.fd(r.width)\n turtle.lt(90)\n turtle.fd(r.height)\n turtle.lt(90)", "def __drawSegment(self, p1, p2, color):\n pygame.draw.aaline(self.screen, color, p1, p2)", "def draw_n(self):\r\n pen.down()\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(135)\r\n pen.forward(1.414*40)\r\n pen.left(135)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.back(40)\r\n pen.forward(50)", "def draw_s(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(20)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(50)", "def draw_pointer(self, cr, newx, newy):\n data = [(self.oldx, self.oldy, 15, (0, 0, 0)),\n (newx, newy, 10, (.8, .8, .8))]\n for (x, y, radius, (r, g, b)) in data:\n if x and y:\n cr.set_source_rgb(r, g, b)\n cr.arc(x, y, radius, 0, 2 * pi)\n cr.fill()", "def draw_rhombus(self, screen):\n pygame.gfxdraw.filled_polygon(screen, self.list_of_coordinates, self.color)\n\n return screen", "def __draw(self, display, color, size):\n\t\tif self.walls[0]: # up\n\t\t\tpygame.draw.line(display, color, (self.col * size , self.row * size) , (self.col * size + size, self.row * size))\n\t\tif self.walls[3]: # down\n\t\t\tpygame.draw.line(display, color, (self.col * size + size, self.row * size + size), (self.col * size , self.row * size + size))\n\t\tif self.walls[1]: #left\n\t\t\tpygame.draw.line(display, color, (self.col * size + size, self.row * size) , (self.col * size + size, self.row * size + size))\n\t\tif self.walls[2]: #right\n\t\t\tpygame.draw.line(display, color, (self.col * size , self.row * size + size), (self.col * size , self.row * size))\n\n\t\tif self.current:\n\t\t\tdraw_rect_with_alpha(display, self.CURRENT_COLOR, Vector((self.col, self.row)) * size, (size, size))\n\n\t\telif self.backtracked and self.SHOW_BACKTRACK:\n\t\t\tdraw_rect_with_alpha(display, self.BACKTRACKED_COLOR, Vector((self.col, self.row)) * size, (size, size))\n\n\t\telif self.visited:\n\t\t\tdraw_rect_with_alpha(display, self.VISITED_COLOR, Vector((self.col, self.row)) * size, (size, size))", "def draw_rectangle(t, w, h):\r\n for i in range(2):\r\n t.forward(w)\r\n t.left(90)\r\n t.forward(h)\r\n t.left(90)", "def draw(self, x, y, dx, dy, color):\n\n draw = ImageDraw.Draw(self.image)\n\n draw.rectangle([(x,y),(dx,dy)], color, outline=None)", "def go_right(self):\n self.change_x = 6\n self.direction = \"R\"", "def draw_circle(self, x0, y0, r, color=None):\n f = 1 - r\n ddF_x = 1\n ddF_y = -2 * r\n x = 0\n y = r\n\n self.set(x0, y0 + r, color)\n self.set(x0, y0 - r, color)\n self.set(x0 + r, y0, color)\n self.set(x0 - r, y0, color)\n\n while x < y:\n if f >= 0:\n y -= 1\n ddF_y += 2\n f += ddF_y\n x += 1\n ddF_x += 2\n f += ddF_x\n\n self.set(x0 + x, y0 + y, color)\n self.set(x0 - x, y0 + y, color)\n self.set(x0 + x, y0 - y, color)\n self.set(x0 - x, y0 - y, color)\n self.set(x0 + y, y0 + x, color)\n self.set(x0 - y, y0 + x, color)\n self.set(x0 + y, y0 - x, color)\n self.set(x0 - y, y0 - x, color)", "def draw_closed(x, y):\n square_pos_x = x * 30\n square_pos_y = (y - 1) * -30\n penup()\n # Sets the position on the position (15, 25) in the square of size (30,30) and draws a filled circle\n setpos(-500 + square_pos_x + 15, 200 + square_pos_y - 25)\n pendown()\n fillcolor('#ff9800')\n begin_fill()\n circle(10)\n end_fill()", "def draw_t(self):\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(20)\r\n pen.down()\r\n pen.back(40)\r\n pen.up()\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(50)", "def check_right(self, i, j, board):\r\n color = board.pieceAt(i, j)\r\n\r\n row = True\r\n for k in range(4):\r\n row &= board.pieceAt(i, j+k) is color\r\n\r\n if row:\r\n return color\r\n else:\r\n return Color.EMPTY", "def make_empty_right(self, e=0):\n self.make_empty_side(u'right')", "def draw_block_element(self, cr, x, y):\n cr.rectangle(\n self.wall_width+x*self.block_size, \n (self.block_height-y-1)*self.block_size, \n self.block_size, self.block_size\n )\n \n cr.set_source_rgb(0.2, 0.25, 0.5)\n cr.fill_preserve()\n\n cr.set_source_rgb(0.8,0.8,0.8)\n cr.set_line_width(self.block_size/10)\n cr.stroke()", "def draw_hex(length, color):\n turtle.color(color)\n turtle.begin_fill()\n for x in range(6):\n turtle.forward(length)\n turtle.right(60)\n turtle.end_fill()", "def fill(self, x, y, width=None, height=None, char=None,\n fg=(255, 255, 255), bg=None):\n self.console.draw_rect(x, y, width, height, char, fg, bg)", "def _draw(self):\n\n pygame.draw.circle(self.display, self.colour,\n self.pos.astype(int),\n self.rad)\n pygame.draw.circle(self.display, black,\n self.pos.astype(int),\n self.rad, 2)", "def draw_right_poly(surface, color, n, position, radius, rotation=0):\n x0, y0 = position\n\n angle_step = 2 * math.pi / n\n angle0 = math.radians(rotation) - angle_step / 2 # with this minus the symmetry is being kept\n\n vertices = [\n (int(x0 + radius * math.sin(angle0 + i * angle_step)), int(y0 + radius * math.cos(angle0 + i * angle_step)))\n for i in range(n)]\n\n draw.polygon(surface, color, vertices)", "def draw(self, screen, offsets: tuple):\r\n pass", "def draw_house_roof(x, y, width, height):\n print('Типа рисую крышу...', x, y, width, height)", "def draw_square(self, x, y, color):\n return self.canvas.create_rectangle(x * self.scale, y * self.scale, \\\n (x + 1) * self.scale, (y + 1) * self.scale, fill = color)", "def move_r(self):\r\n self.image = pygame.transform.scale(pygame.image.load_extended(\"images/playerImage_right.png\").convert_alpha(),\r\n (50, 50))\r\n self.image.set_colorkey((255, 255, 255))\r\n self.rect.x += self.speed\r\n self.lastMove = 'right'\r\n pygame.time.delay(10)", "def draw_square():\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)", "def draw_shape(self, r=0, g=0, b=0): # black is the default color\r\n turtles= turtle.Turtle()\r\n turtles.speed(0) # Makes the turtle speed up\r\n turtles.color(r, g, b)\r\n turtles.showturtle()\r\n turtles.penup()\r\n turtles.pendown()\r\n\r\n # draws the Shape to the screen\r\n\r\n for i in range(self.num_sides):\r\n turtles.forward(self.side_length)\r\n turtles.left(360/(self.num_sides))\r\n turtles.hideturtle()", "def draw(self):", "def update(self): # called to update this piece's position\r\n \r\n if self.name == \"white\" and self.y == 25: self.crowned()\r\n elif self.name == \"black\" and self.y >= 350: self.crowned()\r\n self.draw()", "def do_draw(self, reg0, reg1, nibble):\n assert self.safe_register(reg0), f\"register error [{reg0}]\"\n assert self.safe_register(reg1), f\"register error [{reg1}]\"\n assert self.safe_value(nibble), f\"value error [{nibble}]\"\n _x = self.registers[reg0]\n _y = self.registers[reg1]\n #print(\"drw\", _x, _y)\n for i in range(nibble):\n for j in range(8):\n _sprite = self.memory[self.address_register+i]\n if ((_sprite >> (8-j-1)) & 0x0001) == 1:\n if self.screen_buff[_x+j][_y+i] == 1:\n # COLLISION\n self.registers[15] = 1\n self.screen_buff[_x+j][_y+i] = 0\n else:\n self.screen_buff[_x+j][_y+i] = 1", "def draw(self, screen):", "def update_r(color, new_r):\n\n color.update_r(new_r)", "def gradientRect( window, left_colour, right_colour, target_rect ):\n colour_rect = pygame.Surface( ( 2, 2 ) ) # tiny! 2x2 bitmap\n pygame.draw.line( colour_rect, left_colour, ( 0,0 ), ( 0,1 ) ) # left colour line\n pygame.draw.line( colour_rect, right_colour, ( 1,0 ), ( 1,1 ) ) # right colour line\n colour_rect = pygame.transform.smoothscale( colour_rect, ( target_rect.width, target_rect.height ) ) # stretch!\n window.blit( colour_rect, target_rect ) # paint it", "def fill(self, colour: int, /) -> None:", "def drawRectangle(x, y, width, height):\n pen1.up()\n pen1.goto(x, y)\n pen1.down()\n pen1.fd(width)\n pen1.right(90)\n pen1.fd(height)\n pen1.right(90)\n pen1.fd(width)\n pen1.right(90)\n pen1.fd(height)", "def draw(self, win):\n pygame.draw.rect(win, self.color, self.rect)", "def _put_chr_at(self, char, row, col, color, adjustment_x=.19, adjustment_y=.19):\n self._goto_piece_xy(row, col, adjustment_x, adjustment_y)\n self.pen.color(color)\n self.pen.write(char, font=(\"Courier\", round(self.square_side_size * .7),\n \"normal\"))", "def draw(self, x, y, char=None, fg=(255, 255, 255), bg=None):\n self.console.draw_char(x, y, char, fg, bg)", "def draw_a(self):\r\n pen.down()\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(20)\r\n pen.right(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.up()\r\n pen.left(90)\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.forward(50)", "def changeColor( self ):\n\t\t\n\t\tx, y = self.position.xy\n\t\tself.color = ( int((x / WINDOW_X) * 128), int((x / WINDOW_X) * 128) + int((y / WINDOW_Y) * 128 ), int((y / WINDOW_Y) * 128))", "def draw_pixel_to_display(self):\n register = self.return_middle_registers(self.opcode)\n x = self.registers[register[0]]\n y = self.registers[register[1]]\n height = self.opcode & 0xF\n\n self.registers[0xF] = 0\n\n x = bit_utils.wrap_around(x, self.display.width)\n y = bit_utils.wrap_around(y, self.display.height)\n\n for yline in range(0, height):\n pixels = self.memory[self.I + yline]\n y1 = bit_utils.wrap_around(y + yline, self.display.height)\n for xline in range(0, 8):\n x1 = bit_utils.wrap_around(x + xline, self.display.width)\n if pixels & (0x80 >> xline) != 0:\n if self.display.set_pixel(x1, y1):\n self.registers[0xF] = 1\n\n self.display.draw_flag = True\n logger.info(\"Drawing sprite from {} to {} at {}, {}\".format(\n hex(self.I),\n hex(self.I + height),\n x, y))", "def DrawTurret(self):\n pygame.draw.rect(self.displaysurf, self.color, (int(self.x_coord - T_W1 / 2), WINHEIGHT - T_H1 - GR_HEIGHT, T_W1, T_H1), 0)\n pygame.draw.rect(self.displaysurf, self.color, (int(self.x_coord - T_W2 / 2), WINHEIGHT - (T_H2 + T_H1) - GR_HEIGHT, T_W2, T_H2), 0)\n self.barrel_endx = self.x_coord - int(T_LEN*(math.cos(self.barrel)))\n self.barrel_endy = WINHEIGHT - T_H1 - int(T_LEN*(math.sin(self.barrel))) - GR_HEIGHT\n pygame.draw.line(self.displaysurf, self.color, (self.x_coord, WINHEIGHT - T_H1 - GR_HEIGHT), (self.barrel_endx, self.barrel_endy), T_WID)", "def draw(self):\n if self.master != None :\n outline = Cell.FILLED_COLOR_BORDER if self.fill else Cell.EMPTY_COLOR_BORDER\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = self.fill, outline = outline)", "def rightChild(self, pos):\n return (2 * pos) + 1", "def rightChild(self, pos):\n return (2 * pos) + 1", "def draw(self):\n arcade.draw_rectangle_outline(self.position_x, self.position_y, self.radius, self.color)", "def set_pixel(self, pos, color):\n if pos[0] >= 0 and pos[0] < self.width and pos[1] >= 0 and pos[1] < self.height:\n # Ensure that the y axis increases upwards\n inv_y = self.height - 1 - pos[1]\n pos = (inv_y * self.width * 3) + (pos[0] * 3)\n self.data[pos + 0] = color[0]\n self.data[pos + 1] = color[1]\n self.data[pos + 2] = color[2]", "def _draw_hex(self, coord, color):\n deltas = [(0,0), (self.HEX_WIDTH // 4, self.HEX_WIDTH // 2),\n (self.HEX_WIDTH // 2, 0), \n (self.HEX_WIDTH // 4, -self.HEX_WIDTH // 2),\n (-self.HEX_WIDTH // 4, -self.HEX_WIDTH // 2),\n (-self.HEX_WIDTH // 2, 0)]\n\n points = [coord]\n x, y = coord\n for dx, dy in deltas:\n x += dx\n y += dy\n points.append((x, y))\n \n self.canvas.create_polygon(points, outline=self.HEX_BORDER_COLOR, \n fill=color, width=self.HEX_BORDER)", "def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()", "def draw(self):\r\n arcade.draw_rectangle_filled(self.center.x, self.center.y, self.radius, self.radius, TARGET_SAFE_COLOR)", "def right(self):\n self.move(1,0)", "def needle(self, angle, r1, r2, width, height, ray, ctx, color=\"#000000\"):\n x1 = width / 2 - ray * cos(angle) * r1\n y1 = height / 2 - ray * sin(angle) * r1\n x2 = width / 2 + ray * cos(angle) * r2\n y2 = height / 2 + ray * sin(angle) * r2\n ctx.beginPath()\n ctx.strokeStyle = self.digits_color_1\n ctx.moveTo(x1, y1)\n ctx.lineTo(x2, y2)\n ctx.stroke()", "def _right(self, index):\r\n return 2*index + 2" ]
[ "0.6624936", "0.65994567", "0.65606534", "0.6394199", "0.63757133", "0.6260754", "0.6213263", "0.62062967", "0.6189885", "0.6115182", "0.60815096", "0.60737467", "0.60621893", "0.6014855", "0.6011095", "0.59758043", "0.5940978", "0.5921333", "0.59145665", "0.5895593", "0.5887118", "0.5873707", "0.5862061", "0.58453", "0.58365184", "0.5781303", "0.57804984", "0.5770809", "0.57698154", "0.5765765", "0.57521504", "0.5746531", "0.5742022", "0.5738157", "0.57366246", "0.5728554", "0.57269686", "0.57161635", "0.57152355", "0.57145315", "0.5711761", "0.57102585", "0.5698212", "0.5695751", "0.5689257", "0.56851745", "0.5679913", "0.56749266", "0.56749266", "0.5674562", "0.567254", "0.5654438", "0.56495345", "0.56491363", "0.5647551", "0.563119", "0.5610938", "0.5591838", "0.559022", "0.5587596", "0.5573342", "0.5571987", "0.5562191", "0.55608493", "0.5558562", "0.5558141", "0.555793", "0.55507314", "0.5550143", "0.5547173", "0.55443895", "0.55296385", "0.55221766", "0.55211794", "0.5512475", "0.551028", "0.5509488", "0.5507498", "0.5506331", "0.5501326", "0.5497281", "0.54899716", "0.5484263", "0.5470643", "0.54699993", "0.546953", "0.5457417", "0.54558897", "0.54553574", "0.5453763", "0.5452702", "0.5452702", "0.5442721", "0.54426396", "0.5437003", "0.5423466", "0.5422912", "0.5421458", "0.5414988", "0.54141176" ]
0.62434816
6
This function takes a mouse position and returns the position of the box the click is in or an empty tuple if it is not in the board
def isInBoard(posx, posy): #is pos in the board if posx >= MARGINH and posx <= MARGINH + (BOARDSIZE)*CASESIZE and posy >= MARGINV and posy <= MARGINV + (BOARDSIZE)*CASESIZE: #transform it in case coordinates casex = int((posx - MARGINH)/CASESIZE) + 1 casey = int((posy - MARGINV)/CASESIZE) + 1 return (casex,casey) else: # return emptyu tuple because pos is not in board return ()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mouse_on_grid(self):\n if self.mousePos[0] < grid_pos[0] or self.mousePos[1] < grid_pos[1]:\n return None\n if self.mousePos[0] > grid_pos[0] + grid_size or self.mousePos[1] > grid_pos[1] + grid_size:\n return None\n temp = (self.mousePos[0] - grid_pos[0]\n ) // cell_size, (self.mousePos[1] - grid_pos[1]) // cell_size\n if not self.check_if_locked(temp):\n return temp", "def clickCell(self, event):\n position = self.input.checkMouseInput(event)\n if not position:\n return None\n x = math.floor(position[0] / self.imageWidth)\n y = math.floor(position[1] / self.imageHeight)\n return (int(x), int(y))", "def get_mouse_coordinate(self):\n pos = pygame.mouse.get_pos()\n mov = pygame.mouse.get_rel()\n row = pos[0] // (self.CELL_WIDTH + self.MARGIN)\n col = (pos[1] - self.PANEL_HEIGHT) // (self.CELL_WIDTH + self.MARGIN)\n if mov != (0, 0) and not self.env.not_in_grid(row, col):\n return (row, col)\n return self.markerPos", "def mouse_position(pos):\n x, y = pos\n m = x // SQUARE_SIZE\n n = y // SQUARE_SIZE\n return n, m", "def click(self, x, y):\n row = int((x - self.x)/self.cell_size)\n col = int((y - self.y)/self.cell_size)\n if 0 <= row < ROWS and 0 <= col < COLS:\n return row, col\n return None", "def grid_to_mouse( pos ):\n ix,iy=pos\n px= ix*CELLSIZE + H_CELLSIZE + ix*CELLGAP\n py= iy*CELLSIZE + H_CELLSIZE + iy*CELLGAP\n return (px,py)", "def find_boxes_under_coord(self,x,y):\n\t\treturn tuple(r[self.box_col] for r in self.model if rect_contains(r[self.box_col].rect,x,y))", "def check():\n mouse = pygame.mouse.get_pos()\n\n # mouse[0] = x-coordinate of mouse position.\n # mouse[1] = y-coordinate of mouse position.\n if box1.x + box1.size > mouse[0] > box1.x and box1.y + box1.size > mouse[1] > box1.y:\n return True\n elif box2.x + box2.size > mouse[0] > box2.x and box2.y + box2.size > mouse[1] > box2.y:\n return False\n return None", "def tile_clicked(position):\n\n # retrieve tile index\n for i in range(0, len(BOARD)):\n for j in range(0, len(BOARD[i])):\n if BOARD[i][j].collidepoint(position):\n return [i, j]\n \n return False", "def mouse_to_grid( pos ):\n mx,my=pos\n # account for window border and gap between cells\n ix = int((mx-H_CELLSIZE)/(CELLSIZE+CELLGAP))\n iy = int((my-H_CELLSIZE)/(CELLSIZE+CELLGAP))\n # force respect window borders\n if ix<0 or ix>=GRID_X or iy<0 or iy>=GRID_Y:\n return None\n else:\n return (ix,iy)", "def last_pos(self) -> tuple[int, int]:\n if not self.actions:\n return (self.start_x, self.start_y)\n else:\n box = self.get_hitbox_at(self.time_consumed)\n return box.pos_x, box.pos_y", "def convert_mousepos(self, pos):\n tokenx, tokeny = pos\n row = int((tokenx - self.x_margin) / SPACESIZE)\n column = int((tokeny - self.y_margin) / SPACESIZE)\n return column, row", "def checkMouse(self):\n if self.isClosed():\n raise GraphicsError, \"checkMouse in closed window\"\n _tkCall(self.update)\n if self.mouseX != None and self.mouseY != None:\n x,y = self.toWorld(self.mouseX, self.mouseY)\n self.mouseX = None\n self.mouseY = None\n return Point(x,y)\n else:\n return None", "def mouse_position(self):\r\n # TODO: add: Now deprecated in favor of pi3d.events\r\n if self.mouse:\r\n return self.mouse.position()\r\n elif self.tkwin:\r\n return self.tkwin.winfo_pointerxy()\r\n else:\r\n return -1, -1", "def return_cell(self):\n\n pos = pygame.mouse.get_pos()\n\n x = pos[1] // (self.cell_size+1)\n y = pos[0] // (self.cell_size+1)\n\n return self.grid[x][y]", "def test_if_row_col_well_retrieved_from_mouse_pos(self):\n ui = UIRender(TestUI.image_path)\n row, col = ui.get_row_col_from_mouse((10,25))\n self.assertEqual(row, 0)\n self.assertEqual(col, 0)", "def __get_box(self, position):\n return self.__board[position//self.__length][position%self.__length]", "def checkMouse(self):\n if self.isClosed():\n raise GraphicsError(\"checkMouse in closed window\")\n self.update()\n if self.mouseX != None and self.mouseY != None:\n x,y = self.mouseX, self.mouseY\n self.mouseX = None\n self.mouseY = None\n return Point(x,y)\n else:\n return None", "def click(self, mousepos):\n if self.currentplayer == 1:\n if isinstance(self.player1, Computer):\n return\n else:\n self.player1.move = self.board.findClickedBox(mousepos)\n if self.currentplayer == -1:\n if isinstance(self.player2, Computer):\n return\n else:\n self.player2.move = self.board.findClickedBox(mousepos)", "def getPosition(self):\n\tleft = self.getLeft()\n\ttop = self.getTop()\n\treturn (left,top)", "def mousePosition(self):", "def get_clicked_tile(self, x: int, y: int) -> Optional[Point]:\n\t\ttile_x = x//(self.canvas_width//self.board_size)\n\t\ttile_y = y//(self.canvas_height//self.board_size)\n\n\t\tif tile_x < 0 or tile_x >= self.board_size or tile_y < 0 or tile_y >= self.board_size:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn Point(tile_x, tile_y)", "def mousePos():\n data = display.Display().screen().root.query_pointer()._data\n return data[\"root_x\"], data[\"root_y\"]", "def GetPosition(board):\n\tfor i in range(len(board.matrix)):\n\t\tfor j in range(len(board.matrix[i])):\n\t\t\tif board.matrix[i][j]==\"X\":\n\t\t\t\treturn i,j", "def get_element(mouse): # pylint: disable=inconsistent-return-statements\n point = wtl.Point(mouse.x - 5, mouse.y - 5)\n\n with data_lock:\n if not current_view:\n return\n\n smallest_element, smallest_area = None, 999999\n for e in current_view.snapshot.elements:\n if point in e.bounds and e.bounds.area < smallest_area:\n smallest_area, smallest_element = e.bounds.area, e\n\n return smallest_element", "def get_mouse():\n if CONST.render == 'sfml':\n mouse_pos = Vector2(sfml.Mouse.get_position())/engine.screen_diff_ratio+engine.get_origin_pos()\n return mouse_pos,\\\n [sfml.Mouse.is_button_pressed(sfml.Mouse.LEFT),\n sfml.Mouse.is_button_pressed(sfml.Mouse.RIGHT),\n sfml.Mouse.is_button_pressed(sfml.Mouse.MIDDLE)]\n elif CONST.render == 'pookoo':\n return Vector2(pookoo.input.mouse.position()), [\n False,False,False\n ]\n elif CONST.render == 'kivy':\n return Vector2(), [False,False,False]", "def mouse_coords(desktop=False):\n x, y = c_int(0), c_int(0)\n if desktop:\n mouse.SDL_GetGlobalMouseState(byref(x), byref(y))\n else:\n mouse.SDL_GetMouseState(byref(x), byref(y))\n return (int(x.value), int(y.value))", "def mousePositionRaw(self):", "def mousePositionRaw(self):", "def getMouse():\n return pygame.mouse.get_pos()", "def get_coordinates(button):\n global buttons\n for x in range(len(buttons)):\n for y in range(len(buttons[x])):\n if buttons[x][y] == button:\n return x, y", "def getupperleft(self):\n return (self.rect.x, self.rect.y)", "def find_coords_of_selected_sq(self, evt):\n # saves row and col tuple into two variables\n column, row = self.get_row_col(evt)\n # normalize for all square size by keeping the floor\n column_floor, row_floor = self.floor_of_row_col(column, row)\n\n corner_column = (column_floor * self.sq_size) + self.sq_size\n corner_row = (row_floor * self.sq_size) + self.sq_size\n return corner_column, corner_row", "def check_click(self, mouse_x, mouse_y):\r\n # Change the x/y screen coordinates to grid coordinates\r\n column = mouse_x // 70\r\n row = mouse_y // 70\r\n\r\n if row in [0, 9] or column in [0, 9]:\r\n self.shoot_ray(row, column)\r\n elif 0 < row < 9 and 0 < column < 9:\r\n self.guess_atom(row, column)", "def read_current_mouse_position():\n import pyautogui\n pyautogui.FAILSAFE = False\n return pyautogui.position()", "def get_hit_box(self):\n hit_box = self.player_image.get_rect()\n hit_box[0] = self.player_position[\"x\"]\n hit_box[1] = self.player_position[\"y\"]\n return hit_box", "def getCursorPosition(x, y, box, thickness=1):\n x0, y0, x1, y1 = boxToExtent(box)\n w, h = box[2], box[3]\n delta = thickness - 1\n p = None\n\n if pointInBox(x, y, (x0, y0, thickness, thickness)):\n p = 0\n elif pointInBox(x, y, (x1-delta, y0, thickness, thickness)):\n p = 2\n elif pointInBox(x, y, (x1-delta, y1-delta, thickness, thickness)):\n p = 4\n elif pointInBox(x, y, (x0, y1-delta, thickness, thickness)):\n p = 6\n elif pointInBox(x, y, (x0+thickness, y0, w-(thickness*2), thickness)):\n p = 1\n elif pointInBox(x, y, (x1-delta, y0+thickness, thickness, h-(thickness*2))):\n p = 3\n elif pointInBox(x, y, (x0+thickness, y1-delta, w-(thickness*2), thickness)):\n p = 5\n elif pointInBox(x, y, (x0, y0+thickness, thickness, h-(thickness*2))):\n p = 7\n\n return p", "def get_boxes_under_cursor(self,x=None,y=None):\n\t\tif x is None or y is None:\n\t\t\tx,y,_ = self.window.get_pointer()\n\t\tif not self._boxes_under_cursor or not self._changed_rect:\n\t\t\t# It doesn't matter if these are way off: if the mouse is outside \n\t\t\t# the cache box, it'll be recalculated.\n\t\t\tx,y = self.widget2imgcoords(x,y)\n\t\t\tself._update_boxes(x,y)\n\t\treturn self._boxes_under_cursor[:]", "def getlefttop(self,xnum,ynum):\n left = self.xmargin + xnum*CELLSIZE\n top = self.ymargin + ynum*CELLSIZE\n return (left,top)", "def getMousePosition(self):\n return (self.mouseData.x, self.mouseData.y)", "def get_mouse_pos(new_x_coord, new_y_coord):\n\n x_change = 0\n y_change = 0\n \n # if the joystick returned to its default position (0,0), stop mouse movement\n if not (new_x_coord == 0 and new_y_coord == 0):\n if new_x_coord == 0:\n x_change = 0\n else:\n x_change = new_x_coord\n\n if new_y_coord == 0:\n y_change = 0\n else:\n y_change = -new_y_coord\n \n return (int(x_change), int(y_change))", "def mouse_position_event(self, x: int, y: int):\n pass", "def xy_to_element(self, pos: tuple) -> tuple:\n element_pos_x = pos[0] - c.SCREEN_PADDING\n if pos[1] < c.CANVAS_START_Y:\n screen_element = self.menu\n element_pos_y = pos[1] - c.SCREEN_PADDING\n else:\n screen_element = self.grid\n element_pos_y = pos[1] - (c.CANVAS_START_Y)\n return (screen_element, (element_pos_x, element_pos_y))", "def random_position(self):\n while True:\n h = random.randrange(0, self.height)\n w = random.randrange(0, self.width)\n if self.grid[h, w] == 0:\n return (h, w)", "def _mouse_action(self, pos, pygame):\r\n surface = pygame.display.get_surface()\r\n\r\n width = surface.get_width()\r\n height = surface.get_height()\r\n # get window size\r\n\r\n button_width = width / 5\r\n button_height = height / 6\r\n # calculate button size\r\n\r\n pixel_x, pixel_y = pos\r\n # get user interact position\r\n\r\n # check which button that user interact\r\n # all the conditional statements deal with what the user selects\r\n # on the screen. There are 25 buttons and hence that many conditional\r\n # statements\r\n if 0 < pixel_x < button_width and\\\r\n button_height < pixel_y < 2 * button_height:\r\n return self.list[0]\r\n elif button_width < pixel_x < 2 * button_width and\\\r\n button_height < pixel_y < 2 * button_height:\r\n return self.list[1]\r\n elif 2 * button_width < pixel_x < 3 * button_width and\\\r\n button_height < pixel_y < 2 * button_height:\r\n return self.list[2]\r\n elif 3 * button_width < pixel_x < 4 * button_width and\\\r\n button_height < pixel_y < 2 * button_height:\r\n return self.list[3]\r\n elif 4 * button_width < pixel_x < 5 * button_width and\\\r\n button_height < pixel_y < 2 * button_height:\r\n return self.list[4]\r\n elif 0 < pixel_x < button_width and\\\r\n 2 * button_height < pixel_y < 3 * button_height:\r\n return self.list[5]\r\n elif button_width < pixel_x < 2 * button_width and\\\r\n 2 * button_height < pixel_y < 3 * button_height:\r\n return self.list[6]\r\n elif 2 * button_width < pixel_x < 3 * button_width and\\\r\n 2 * button_height < pixel_y < 3 * button_height:\r\n return self.list[7]\r\n elif 3 * button_width < pixel_x < 4 * button_width and\\\r\n 2 * button_height < pixel_y < 3 * button_height:\r\n return self.list[8]\r\n elif 4 * button_width < pixel_x < 5 * button_width and\\\r\n 2 * button_height < pixel_y < 3 * button_height:\r\n return self.list[9]\r\n elif 0 < pixel_x < button_width and\\\r\n 3 * button_height < pixel_y < 4 * button_height:\r\n return self.list[10]\r\n elif button_width < pixel_x < 2 * button_width and\\\r\n 3 * button_height < pixel_y < 4 * button_height:\r\n return self.list[11]\r\n elif 2 * button_width < pixel_x < 3 * button_width and\\\r\n 3 * button_height < pixel_y < 4 * button_height:\r\n return self.list[12]\r\n elif 3 * button_width < pixel_x < 4 * button_width and\\\r\n 3 * button_height < pixel_y < 4 * button_height:\r\n return self.list[13]\r\n elif 4 * button_width < pixel_x < 5 * button_width and\\\r\n 3 * button_height < pixel_y < 4 * button_height:\r\n return self.list[14]\r\n elif 0 < pixel_x < button_width and\\\r\n 4 * button_height < pixel_y < 5 * button_height:\r\n return self.list[15]\r\n elif button_width < pixel_x < 2 * button_width and\\\r\n 4 * button_height < pixel_y < 5 * button_height:\r\n return self.list[16]\r\n elif 2 * button_width < pixel_x < 3 * button_width and\\\r\n 4 * button_height < pixel_y < 5 * button_height:\r\n return self.list[17]\r\n elif 3 * button_width < pixel_x < 4 * button_width and\\\r\n 4 * button_height < pixel_y < 5 * button_height:\r\n return self.list[18]\r\n elif 4 * button_width < pixel_x < 5 * button_width and\\\r\n 4 * button_height < pixel_y < 5 * button_height:\r\n return self.list[19]\r\n elif 0 < pixel_x < button_width and\\\r\n 5 * button_height < pixel_y < 6 * button_height:\r\n return self.list[20]\r\n elif button_width < pixel_x < 2 * button_width and\\\r\n 5 * button_height < pixel_y < 6 * button_height:\r\n return self.list[21]\r\n elif 2 * button_width < pixel_x < 3 * button_width and\\\r\n 5 * button_height < pixel_y < 6 * button_height:\r\n return self.list[22]\r\n elif 3 * button_width < pixel_x < 4 * button_width and\\\r\n 5 * button_height < pixel_y < 6 * button_height:\r\n return self.list[23]\r\n elif 4 * button_width < pixel_x < 5 * button_width and\\\r\n 5 * button_height < pixel_y < 6 * button_height:\r\n return self.list[24]", "def firstMove(board):\r\n x = board.size / 2\r\n return (x, x)", "def find_valid_position(self, position: pygame.math.Vector2) -> bool:\n\n window_rect = self.ui_manager.get_root_container().rect\n\n if window_rect.contains(pygame.Rect(int(position[0]), int(position[1]), 1, 1)):\n self.rect.left = int(position.x)\n self.rect.top = int(position.y + self.hover_distance_from_target[1])\n\n if window_rect.contains(self.rect):\n self.relative_rect = self.rect.copy()\n self.text_block.set_position(self.rect.topleft)\n return True\n else:\n if self.rect.bottom > window_rect.bottom:\n self.rect.bottom = int(position.y - self.hover_distance_from_target[1])\n if self.rect.right > window_rect.right:\n self.rect.right = window_rect.right - self.hover_distance_from_target[0]\n if self.rect.left < window_rect.left:\n self.rect.left = window_rect.left + self.hover_distance_from_target[0]\n\n if window_rect.contains(self.rect):\n self.relative_rect = self.rect.copy()\n self.text_block.set_position(self.rect.topleft)\n return True\n else:\n self.relative_rect = self.rect.copy()\n warnings.warn(\"Unable to fit tool tip on screen\")\n return False\n else:\n self.relative_rect = self.rect.copy()\n warnings.warn(\"initial position for tool tip is off screen,\"\n \" unable to find valid position\")\n return False", "def _get_position_grid_column(position, grid_row):\n \n for (box, grid_col_index) in zip(grid_row, range(len(grid_row))):\n if box.contains_point((position.x, position.y)):\n return grid_col_index\n return None", "def pos(self):\n x = (self.ec._win._mouse_x -\n self.ec._win.width / 2.) / (self.ec._win.width / 2.)\n y = (self.ec._win._mouse_y -\n self.ec._win.height / 2.) / (self.ec._win.height / 2.)\n return np.array([x, y])", "def get_row_col(mouse_x, mouse_y):\n # Note: the top row is row=0 (bottom row=2), left col is col=0 (right col=2)\n spacing_x = 86 + 8\n spacing_y = 98 + 5\n top_y = 50\n left_x = 50\n return (mouse_y - top_y) // spacing_y, (mouse_x - left_x) // spacing_x", "def selection_board_maintenance(self,x_cor,y_cor):\t\t\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit() \r\n\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\t\t\t\t#print(\"mouse is pressed\")\r\n\t\t\t\t#everything begins here\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\t\t\t\t#print(who_is_clicked)\r\n\t\t\t\tif (self.selected_from_selection_bar + self.selected_from_board):\r\n\t\t\t\t\t#print(\"inside selected item one\")\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_board = False\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =Helping_Class.selection_bar_reverse_mapping[piece]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t#print(\"nothing is selected\")\r\n\t\t\t\t\t#check if clicked on his piece change then select it\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t#print(self.selected_piece,self.selected_position,self.selected_from_selection_bar)\r\n\r\n\t\t\t\t\t\t\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\t#color change\r\n\t\t\t\t#who_is_clicked is dummy variable as no click has occurred\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\r\n\t\t\t\tself.blit_piece = [(x_adjusted,y_adjusted),piece]", "def getCellpos(self, event):\n e = event.widget\n cx, cy = cart(e.canvasx(event.x), e.canvasy(event.y))\n cellx = int(cx) // self.cell_width\n celly = int(cy) // self.cell_height\n return cellx, celly", "def position(square):\n first = square[0]\n second = square[1]\n col = parseCol(first)\n row = parseRow(second)\n return (row, col)", "def handle_click_event(grid, initial_number):\n mouse_x, mouse_y = pygame.mouse.get_pos()\n x, y = mouse_x // CELL_SIZE, mouse_y // CELL_SIZE\n cell = grid.get_cell(x, y)\n\n if not cell.is_empty():\n number = cell.get_content().get_n()\n select_player(number)\n return number\n else:\n return initial_number", "def check_contained(self,x,y):\n if self.active:\n self.reset()\n #if in horizontal bounds\n if x > self.left and x < self.right:\n slope = 1/sqrt(3)\n #use to set create verticle bounds\n if x - self.center_x <= 0:\n slope *= -1\n\n ################\n x_rel = x - self.center_x #bounds depends on x location of the mouse \n bottom_bound = self.bottom - (x_rel*slope)\n top_bound = self.top - (x_rel*-slope)\n ################\n\n if y >= top_bound and y <= bottom_bound:\n if Ctrl_Vars.Left_MouseDown:\n self.press() # if all conditions are met use functionality", "def topleft(self, x=0, y=0):\n topleft = self.rect.topleft\n if x or y:\n return (topleft[0]+x, topleft[1]+y)\n return topleft", "def get_pos(self) -> tuple:\n return self.rect.center", "def mouse_click(self,event):\n global drag_sq\n# print \"click at {0} {1}\".format(event.x,event.y)\n# sq = (event.y // sq_size) * 8 + event.x // sq_size\n sq = self.coord_to_sq((event.x, event.y))\n if sq in self.piece_objs:\n drag_sq = sq\n self.canvas.tag_raise(self.piece_objs[sq])\n return", "def find_paddle(grid):\n for x in range(X_COLS):\n if grid[x][CURSOR_ROW] == 3:\n paddle_x = x\n\n return paddle_x", "def pointInBox(x, y, box):\n e = boxToExtent(box)\n return x >= e[0] and x <= e[2] and y >= e[1] and y <= e[3]", "def get_pick_position(self):\n x0 = int(self.GetPickX1())\n x1 = int(self.GetPickX2())\n y0 = int(self.GetPickY1())\n y1 = int(self.GetPickY2())\n return x0, y0, x1, y1", "def get_move(self, board):\n while True:\n col = random.randint(0, board.width)\n row = board.try_move(col)\n\n if row >= 0:\n break\n\n return row, col", "def mouse_motion_current_mouse_position() -> EventType:\n x, y = pygame.mouse.get_pos()\n return pygame.event.Event(pygame.MOUSEMOTION, {'pos': (int(x), int(y))})", "def locate(x, y):\n position(x * 6, y)", "def mousePressed(index):\n return (pygame.mouse.get_pressed()[index])", "def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y", "def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y", "def current_position(self, solved_row, solved_col):\r\n solved_value = (solved_col + self._width * solved_row)\r\n\r\n for row in range(self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] == solved_value:\r\n return (row, col)\r\n assert False, \"Value \" + str(solved_value) + \" not found\"", "def current_position(self, solved_row, solved_col):\r\n solved_value = (solved_col + self._width * solved_row)\r\n\r\n for row in range(self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] == solved_value:\r\n return (row, col)\r\n assert False, \"Value \" + str(solved_value) + \" not found\"", "def current_position(self, solved_row, solved_col):\r\n solved_value = (solved_col + self._width * solved_row)\r\n\r\n for row in range(self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] == solved_value:\r\n return (row, col)\r\n assert False, \"Value \" + str(solved_value) + \" not found\"", "def mouseResponse(event, x, y, flags, param):\n\n\n global uppLeft, lowLeft, uppRight\n\n if event == cv2.EVENT_LBUTTONDOWN:\n cv2.circle(workImg, (x, y), 5, (255, 0, 255), -1)\n if uppLeft is None:\n uppLeft = [x, y]\n print(\"uppLeft is \" + str(uppLeft))\n\n elif lowLeft is None:\n lowLeft = [x, y]\n print(\"lowLeft is \" + str(lowLeft))\n\n elif uppRight is None:\n uppRight = [x, y]\n print(\"uppRight is \" + str(uppRight))", "def get_pos(self, off_w=0, off_l=0, off_h=0):\n try:\n return self.world_grid[self.w + off_w][self.l + off_l][self.h + off_h]\n except IndexError:\n return blocks['wall']", "def check_clicked(self, events):\n x = self.x\n y = self.y\n xsize = self.xsize\n ysize = self.ysize\n (a, b) = pygame.mouse.get_pos()\n if a>x and b>y and a<x+xsize and b<y+ysize:\n for event in events:\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.clickedAction(events)\n self.clicked = True\n return self.clicked", "def testGetClickBbox(self):\r\n # pixel coords for fake click\r\n self.prepareTestCanvas()\r\n myPoint = QgsPoint(50, 15)\r\n myBox = self.bucketFill.getClickBbox(myPoint)\r\n myExpectedBox = QgsRectangle(49.99850465,\r\n 14.99850465,\r\n 50.00149535,\r\n 15.00149535)\r\n myMessage = ('Bounding box was incorrect. Received values %s'\r\n ' Expected values %s' % (\r\n str('%s, %s, %s, %s' % (\r\n myBox.xMinimum(), myBox.yMinimum(),\r\n myBox.xMaximum(), myBox.yMaximum()\r\n )),\r\n str('%s, %s, %s, %s' % (\r\n myExpectedBox.xMinimum(), myExpectedBox.yMinimum(),\r\n myExpectedBox.xMaximum(), myExpectedBox.yMaximum()\r\n ))\r\n ))\r\n assert (round(myBox.xMinimum(), 9) ==\r\n round(myExpectedBox.xMinimum(), 9) and\r\n round(myBox.xMaximum(), 9) ==\r\n round(myExpectedBox.xMaximum(), 9) and\r\n round(myBox.yMinimum(), 9) ==\r\n round(myExpectedBox.yMinimum(), 9) and\r\n round(myBox.yMaximum(), 9) ==\r\n round(myExpectedBox.yMaximum(), 9)), myMessage", "def cursor_coordinates(self):\n text = self.getText()\n lines = text.split(\"\\n\")\n pos = self.getCursorPos()\n if pos == 0:\n return (0, 0)\n i = 0\n cursor_row = -1\n cursor_col = -1\n for row, line in enumerate(lines):\n i += len(line) + 1 # we need to include \"\\n\"\n if pos < i:\n cursor_row = row\n cursor_col = pos - i + len(line) + 1\n break\n return (cursor_col, cursor_row)", "def mouseResponse(event, x, y, flags, param):\n global uppLeft, lowLeft, uppRight\n if event == cv2.EVENT_LBUTTONDOWN:\n cv2.circle(workImg, (x, y), 5, (0, 255, 255), -1)\n if uppLeft is None:\n uppLeft = [x, y]\n print(\"x = \", x, \"y = \", y)\n elif lowLeft is None:\n lowLeft = [x, y]\n print(\"x = \", x, \"y = \", y)\n elif uppRight is None:\n uppRight = [x, y]\n print(\"x = \", x, \"y = \", y)", "def get_pos(self, cx, cy):\n x = self.min_x + cx*(self.size+0.5)\n y = self.min_y + cy*(self.size+0.5)\n return (x,y)", "def GetMove(self, board):\n move = None\n while True:\n move = input(\"Enter coordinates as XY (e.g. 21): \")\n if board[Game.GetIndexFromCoords(*move)] == \" \":\n return Game.GetIndexFromCoords(*move)\n else:\n print(\"Space occupied.\")", "def get_move_type(clicked_tile_position, blank_position):\n move_type = None # will hold move type\n\n clicked_row = clicked_tile_position[0] # get clicked row number\n clicked_col = clicked_tile_position[1] # get clicked column number\n\n blank_row = blank_position[0] # get blank row number\n blank_col = blank_position[1] # get blank column number\n\n # check UP or DOWN\n if clicked_row > blank_row and clicked_col == blank_col: # DOWN move\n move_type = 'down'\n elif clicked_row < blank_row and clicked_col == blank_col: # UP move\n move_type = 'up'\n \n # check LEFT or RIGHT\n if clicked_col > blank_col and clicked_row == blank_row: # RIGHT move\n move_type = 'right'\n elif clicked_col < blank_col and clicked_row == blank_row: # LEFT move\n move_type = 'left'\n \n return move_type", "def current_position(self, solved_row, solved_col):\n solved_value = (solved_col + self._width * solved_row)\n\n for row in range(self._height):\n for col in range(self._width):\n if self._grid[row][col] == solved_value:\n return (row, col)\n assert False, \"Value \" + str(solved_value) + \" not found\"", "def current_position(self, solved_row, solved_col):\n solved_value = (solved_col + self._width * solved_row)\n\n for row in range(self._height):\n for col in range(self._width):\n if self._grid[row][col] == solved_value:\n return (row, col)\n assert False, \"Value \" + str(solved_value) + \" not found\"", "def current_position(self, solved_row, solved_col):\n solved_value = (solved_col + self._width * solved_row)\n\n for row in range(self._height):\n for col in range(self._width):\n if self._grid[row][col] == solved_value:\n return (row, col)\n assert False, \"Value \" + str(solved_value) + \" not found\"", "def current_position(self, solved_row, solved_col):\n solved_value = (solved_col + self._width * solved_row)\n\n for row in range(self._height):\n for col in range(self._width):\n if self._grid[row][col] == solved_value:\n return (row, col)\n assert False, \"Value \" + str(solved_value) + \" not found\"", "def get_points_from_box(box):\n\t# Center of the box x = (x1+x2)/2 et y = (y1+y2)/2\n\tcenter_x = int(((box[1]+box[3])/2))\n\tcenter_y = int(((box[0]+box[2])/2))\n\n\treturn (center_x,center_y)", "def topleft(self):\n return (self.left, self.top)", "def select_maps(x, y, w, h, _selected_map, current_map):\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n\r\n # Checking where the cursor is\r\n if x + w > mouse[0] > x and y + h > mouse[1] > y:\r\n if click[0] == 1:\r\n if _selected_map >= 0:\r\n return _selected_map\r\n return current_map", "def get_xy_position(row, col):\n spacing_x = 86 + 11\n spacing_y = 98 + 8\n top_y = 50\n left_x = 50\n return left_x + col * spacing_x, top_y + row * spacing_y", "def _get_loc_top_left_placement_b(button_number):\r\n if button_number == 0:\r\n return 35 / 2, 7 / 2 # placement button that starts the game\r\n\r\n elif button_number == 1:\r\n return 35 / 2, 13 / 2 # placement button that brings program back to main menu\r", "def _get_selection_screencoords(self):\n selection = self.source.index.metadata[\"selections\"]\n if (selection is not None) and (len(selection) == 2):\n mapper = self.source.index_mapper\n return mapper.map_screen(np.array(selection))\n else:\n return None", "def checkPlayerBoxes(self, player, event):\n playerCoordinates = [player.positionRect.x, player.positionRect.y]\n element = self._get_grille()[playerCoordinates[1]][playerCoordinates[0]].element\n\n #If the player goes left\n if event.key == K_LEFT:\n playerCoordinates[0] -= 1\n if playerCoordinates[0] >= 0:\n element = self._get_grille()[playerCoordinates[1]][playerCoordinates[0]].element\n\n #If the player goes right\n elif event.key == K_RIGHT:\n playerCoordinates[0] += 1\n if playerCoordinates[0] < len(self._get_grille()[0]):\n element = self._get_grille()[playerCoordinates[1]][playerCoordinates[0]].element\n\n #If the player goes up\n elif event.key == K_UP:\n playerCoordinates[1] -= 1\n if playerCoordinates[1] >= 0:\n element = self._get_grille()[playerCoordinates[1]][playerCoordinates[0]].element\n\n #If the player goes down\n elif event.key == K_DOWN:\n playerCoordinates[1] += 1\n if playerCoordinates[1] < len(self._get_grille()):\n element = self._get_grille()[playerCoordinates[1]][playerCoordinates[0]].element \n\n else:\n pass\n \n if element is not None and element.symbol == \"B\":\n print(element.name)\n return element\n\n else:\n return None", "def getMouse(self):\n self.mouseX = None\n self.mouseY = None\n while self.mouseX == None or self.mouseY == None:\n #self.update()\n _tkCall(self.update)\n if self.isClosed(): raise GraphicsError, \"getMouse in closed window\"\n time.sleep(.1) # give up thread\n x,y = self.toWorld(self.mouseX, self.mouseY)\n self.mouseX = None\n self.mouseY = None\n return Point(x,y)", "def get_widget_at(self, x: int, y: int) -> tuple[Widget, Region]:\n for widget, cropped_region, region in self:\n if widget.is_visual and cropped_region.contains(x, y):\n return widget, region\n raise NoWidget(f\"No widget under screen coordinate ({x}, {y})\")", "def _get_bounding_box(self, frame, bounding_offset):\n\n # Try to find board if the boundingbox is not set\n center, ellipse, mask = self.board.detect(frame)\n\n # Should not be None\n if center is None:\n print(\"skipping frame\")\n return None\n if ellipse is None:\n print(\"skipping frame\")\n return None\n if mask is None:\n print(\"skipping frame\")\n return None\n\n self.point_mask = mask\n # cv2.imshow(\"mask\", mask)\n\n x_offset = (ellipse[1][0] / 2)\n x_center = ellipse[0][0]\n\n y_offset = ellipse[1][1] / 2\n y_center = ellipse[0][1]\n\n minx = max(0, x_center - x_offset - bounding_offset)\n maxx = min(self.width, x_center + x_offset + bounding_offset)\n miny = max(0, y_center - y_offset - bounding_offset)\n maxy = min(self.height, y_center + y_offset + bounding_offset)\n return ((int(minx), int(miny)), (int(maxx), int(maxy)))", "def _check_if_position_on_board(coord: tuple, board_size: int):\n in_row = coord[0] in range(board_size)\n in_col = coord[1] in range(board_size)\n return in_row and in_col", "def getSelectedPosition(*args):", "def within(point: tuple, box: tuple) -> bool:\r\n \r\n return box[0] < point[0] < box[2] and box[1] < point[1] < box[3]", "def get_pos(self):\n return (self.x, self.y)", "def get_move(board, player):\r\n row, col = 0, 0\r\n return row, col", "def get_points_from_box(box):\n # Center of the box x = (x1+x2)/2 et y = (y1+y2)/2\n center_x = int(((box[1]+box[3])/2))\n center_y = int(((box[0]+box[2])/2))\n center = center_x, center_y\n ground = center_x, box[2]\n\n return center, ground", "def get_mouse_position(self):\n raise NotImplementedError" ]
[ "0.69800913", "0.69175345", "0.6895727", "0.6853113", "0.6841856", "0.68141514", "0.671112", "0.6637405", "0.66198343", "0.65802467", "0.6531164", "0.64707327", "0.64676", "0.64114225", "0.63978434", "0.6394438", "0.6353023", "0.6344379", "0.6328014", "0.6309418", "0.6291824", "0.6283126", "0.6241309", "0.62193", "0.6218453", "0.62163824", "0.6184667", "0.6162169", "0.6162169", "0.6086252", "0.60608655", "0.6059931", "0.6048602", "0.6043203", "0.6019066", "0.60039914", "0.6003097", "0.5981533", "0.59761035", "0.59514666", "0.59493434", "0.593276", "0.59261864", "0.592581", "0.5894651", "0.58833873", "0.5870416", "0.5853817", "0.5839722", "0.5830265", "0.5825122", "0.5809592", "0.5800062", "0.579233", "0.5780613", "0.5780273", "0.5774944", "0.5762907", "0.5760259", "0.57601017", "0.57548285", "0.57478607", "0.5742573", "0.5739384", "0.57124716", "0.5711738", "0.5711738", "0.5709023", "0.5709023", "0.5709023", "0.57021725", "0.56958085", "0.5695052", "0.5669848", "0.56688714", "0.5666178", "0.56657165", "0.566401", "0.56575567", "0.5654935", "0.5654935", "0.5654935", "0.5654935", "0.56436014", "0.56390077", "0.5635628", "0.56322795", "0.5622555", "0.5615645", "0.5615089", "0.56143236", "0.5612184", "0.56085926", "0.5605356", "0.55940455", "0.55904126", "0.55855185", "0.55832314", "0.55825794", "0.55773914" ]
0.58239156
51
This function makes the move on the state_board
def make(self,state_board): state_board[self.column][self.line] = self.couleur #place the piece drawPiece((self.column,self.line),self.couleur) #draws it on the board for pos in self.flips: #flips all the pieces in flips state_board[pos[0]][pos[1]] = self.couleur drawPiece(pos,self.couleur) #draws it on the board
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move(self, state):\n raise NotImplementedError(\"Need to implement this method\")", "def move(self, board):\n # first, make your turn:\n currentState = board[self.x,self.y]\n turnDir = self.rule[(currentState + 1) % len(self.rule)]\n self.turn( int(turnDir) )\n # next, let's change this cell's state:\n if currentState >= len(self.rule) - 1:\n board[self.x,self.y] = 0\n else:\n board[self.x,self.y] = currentState + 1\n # and let's move:\n offsets = self.nextPositionOffset() # based on x, y, and dir\n self.x, self.y = board.move(self.x, self.y, offsets[0], offsets[1])", "def move(self, board):\n raise NotImplementedError", "def make_move(self, move):\n self.board[int(move) - 1] = self.nplayer", "def apply_move(self, move):\r\n next_board = copy.deepcopy(self.board)\r\n next_board.place(self.next_player, move)\r\n return GameState(next_board, self.next_player.other, move)", "def move(self, board):\n\n # We record all game positions to feed them into the NN for training with the corresponding updated Q\n # values.\n self.board_position_log.append(board.getState().copy())\n\n nn_input = self.board_state_to_nn_input(board.getState())\n probs, _ = self.get_valid_probs([nn_input], self.q_net, [board])\n probs = probs[0]\n # print(probs)\n # print(type(probs))\n # print(probs.shape)\n # input()\n # print(probs)\n # Most of the time our next move is the one with the highest probability after removing all illegal ones.\n # Occasionally, however we randomly chose a random move to encourage exploration\n if (self.training is True) and \\\n ((self.game_counter < self.pre_training_games) or (np.random.rand(1) < self.random_move_prob)):\n available = []\n for index in range(6):\n if probs[index] != -1.0:\n available.append(index)\n randomOne = random.randint(0,len(available)-1)\n move = available[randomOne]\n else:\n move = np.argmax(probs)\n # We record the action we selected as well as the Q values of the current state for later use when\n # adjusting NN weights.\n self.action_log.append(move)\n\n # We execute the move and return the result\n board.makeMove(move)\n return board.getState(), board.isOver()", "def request_move(self, board):\n pass", "def move(self, state, move_cmd, i, j):\r\n new_state = self.clone_state(state)\r\n coordinate_change = self.action_dic[self.reflection_dic[move_cmd]]\r\n new_state[i][j], new_state[i + coordinate_change[0]][j + coordinate_change[1]] = \\\r\n new_state[i + coordinate_change[0]][j + coordinate_change[1]]\\\r\n , new_state[i][j]\r\n return new_state", "def step(self, move):\r\n self.board.push_uci(move)\r\n self.num_halfmoves += 1", "def move(self):\n pass", "def apply_move(self, move, state):\n x, y , heading, grid_data = state\n map_data = [row[:] for row in grid_data]\n if move == self.MOVE_FORWARD:\n # get coordinates for next cell\n if heading == self.UP:\n next_y = y - 1\n next_x = x\n elif heading == self.DOWN:\n next_y = y + 1\n next_x = x\n elif heading == self.LEFT:\n next_y = y\n next_x = x - 1\n else:\n next_y = y\n next_x = x + 1\n\n # handle special tile types\n if map_data[next_y][next_x] == self.ICE_SYMBOL:\n # handle ice tile - slide until first non-ice tile or blocked\n if heading == self.UP:\n for i in range(next_y, -1, -1):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i + 1\n break\n else:\n next_y = i\n break\n elif heading == self.DOWN:\n for i in range(next_y, self.y_size):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i - 1\n break\n else:\n next_y = i\n break\n elif heading == self.LEFT:\n for i in range(next_x, -1, -1):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i + 1\n break\n else:\n next_x = i\n break\n else:\n for i in range(next_x, self.x_size):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i - 1\n break\n else:\n next_x = i\n break\n if map_data[next_y][next_x] == self.TELEPORT_SYMBOL:\n # handle teleport - find the other teleporter\n tpy, tpx = (None, None)\n for i in range(self.y_size):\n for j in range(self.x_size):\n if map_data[i][j] == self.TELEPORT_SYMBOL and (i != next_y or j != next_x):\n tpy, tpx = (i, j)\n break\n if tpy is not None:\n break\n if tpy is None:\n raise Exception(\"LaserTank Map Error: Unmatched teleport symbol\")\n next_y, next_x = (tpy, tpx)\n else:\n # if not ice or teleport, perform collision check\n if self.cell_is_blocked(next_y, next_x, map_data):\n return self.COLLISION\n\n # check for game over conditions\n if self.cell_is_game_over(next_y, next_x, map_data):\n return self.GAME_OVER\n\n # no collision and no game over - update player position\n y = next_y\n x = next_x\n return (x, y, heading, map_data)\n\n elif move == self.TURN_LEFT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.LEFT\n elif heading == self.DOWN:\n heading = self.RIGHT\n elif heading == self.LEFT:\n heading = self.DOWN\n else:\n heading = self.UP\n return (x, y, heading, map_data)\n\n elif move == self.TURN_RIGHT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.RIGHT\n elif heading == self.DOWN:\n heading = self.LEFT\n elif heading == self.LEFT:\n heading = self.UP\n else:\n heading = self.DOWN\n return (x, y, heading, map_data)\n\n elif move == self.SHOOT_LASER:\n # set laser direction\n if heading == self.UP:\n laserheading = self.UP\n dy, dx = (-1, 0)\n elif heading == self.DOWN:\n laserheading = self.DOWN\n dy, dx = (1, 0)\n elif heading == self.LEFT:\n laserheading = self.LEFT\n dy, dx = (0, -1)\n else:\n laserheading = self.RIGHT\n dy, dx = (0, 1)\n\n # loop until laser blocking object reached\n ly, lx = (y, x)\n while True:\n ly += dy\n lx += dx\n\n # handle boundary and immovable obstacles\n if ly < 0 or ly >= self.y_size or \\\n lx < 0 or lx >= self.x_size or \\\n map_data[ly][lx] == self.OBSTACLE_SYMBOL:\n # laser stopped without effect\n return self.COLLISION\n\n # handle movable objects\n elif self.cell_is_laser_movable(ly, lx, laserheading, map_data):\n # check if tile can be moved without collision\n if self.cell_is_blocked(ly + dy, lx + dx, map_data) or \\\n map_data[ly + dy][lx + dx] == self.ICE_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.TELEPORT_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.FLAG_SYMBOL or \\\n (ly + dy == y and lx + dx == x):\n # tile cannot be moved\n return self.COLLISION\n else:\n old_symbol = map_data[ly][lx]\n map_data[ly][lx] = self.LAND_SYMBOL\n if map_data[ly + dy][lx + dx] == self.WATER_SYMBOL:\n # if new bridge position is water, convert to land tile\n if old_symbol == self.BRIDGE_SYMBOL:\n map_data[ly + dy][lx + dx] = self.LAND_SYMBOL\n # otherwise, do not replace the old symbol\n else:\n # otherwise, move the tile forward\n map_data[ly + dy][lx + dx] = old_symbol\n break\n\n # handle bricks\n elif map_data[ly][lx] == self.BRICK_SYMBOL:\n # remove brick, replace with land\n map_data[ly][lx] = self.LAND_SYMBOL\n break\n\n # handle facing anti-tanks\n elif (map_data[ly][lx] == self.ANTI_TANK_UP_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.ANTI_TANK_DOWN_SYMBOL and laserheading == self.UP) or \\\n (map_data[ly][lx] == self.ANTI_TANK_LEFT_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.ANTI_TANK_RIGHT_SYMBOL and laserheading == self.LEFT):\n # mark anti-tank as destroyed\n map_data[ly][lx] = self.ANTI_TANK_DESTROYED_SYMBOL\n break\n\n # handle player laser collision\n elif ly == y and lx == x:\n return self.GAME_OVER\n\n # handle facing mirrors\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.LEFT):\n # new direction is up\n dy, dx = (-1, 0)\n laserheading = self.UP\n elif (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.RIGHT) or \\\n (self.grid_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.LEFT):\n # new direction is down\n dy, dx = (1, 0)\n laserheading = self.DOWN\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.UP):\n # new direction is left\n dy, dx = (0, -1)\n laserheading = self.LEFT\n elif (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.UP):\n # new direction is right\n dy, dx = (0, 1)\n laserheading = self.RIGHT\n # do not terminate laser on facing mirror - keep looping\n\n # check for game over condition after effect of laser\n if self.cell_is_game_over(y, x, map_data):\n return self.GAME_OVER\n return (x, y, heading, map_data)\n return self.SUCCESS", "def computer_move(board,move,player):\r\n com_execution(board, move, player)", "def move(self, row, col, player):", "def execute_move(self, game_state):\n game_state.pacs_pos[self.pac_id] = self.next_move", "def move(self, direction):\n # replace with your code\n\n indices = self.direction_indices[direction]\n for coordinate in indices:\n merged_coordinate_list = self.get_list(direction, coordinate)\n self.change_board(merged_coordinate_list, coordinate, direction)\n print(self.__str__())\n if self.board_is_not_full():\n self.new_tile()", "def move(self, action):\n \n currentState = self.state\n\n if action == \"up\":\n newState = (self.state[0] - 1, self.state[1])\n elif action == \"down\":\n newState = (self.state[0] + 1, self.state[1])\n elif action == \"right\":\n newState = (self.state[0], self.state[1] + 1)\n elif action == \"left\":\n newState = (self.state[0], self.state[1] - 1)\n else:\n raise NameError(action, 'This is not a valid action!')\n\n # Need to check if the new state is a legal move\n if (newState[0] >= 0) & (newState[0] <= 1) & (newState[1] >= 0) & (newState[1] <= 2):\n return newState\n else:\n print('This move takes you off the board, you have not moved!')\n return currentState", "def move():\n Robot.move()", "def make_move(self, move):\n if int(move) < 0 or int(move) > 48 or self.board[int(move) // 7][int(move) % 7] != \"\" or int(move) % 2 == 0:\n raise ValueError(\"{} is not a valid move for {}\".format(move, self.board))\n DotsAndBoxesState.score1 += self.check_score(move)\n self.board[int(move) // 7][int(move) % 7] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n self.turn = get_opponent(self.turn) #change into another player's trun", "def make_move(self, direction):\r\n\t\tif direction == 0:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x-1][self.y] = self.board[self.x-1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x -= 1\r\n\r\n\t\telif direction == 1:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y+1] = self.board[self.x][self.y+1], self.board[self.x][self.y]\r\n\t\t\tself.y += 1\r\n\r\n\t\telif direction == 2:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x+1][self.y] = self.board[self.x+1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x += 1\r\n\r\n\t\telif direction == 3:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y-1] = self.board[self.x][self.y-1], self.board[self.x][self.y]\r\n\t\t\tself.y -= 1", "def move(self, move):\n out = ''\n for val in self.moves[move]:\n out += self.state[val]\n self.state = out", "def make_move(self, px, py):\n if self.state != State.IN_PROGRESS:\n raise GameEndedError('Cannot make move. The game has ended.')\n x, y, i, j = self.to_coords(px, py)\n board = self.boards[x][y]\n if (x, y) not in self.available_boards:\n raise IllegalMoveError('Illegal move. Board is unavailable.')\n board.set_square(i, j, self.__on_turn)\n self.__on_turn = Square.X if self.__on_turn == Square.O else Square.O\n self.last_move = (x, y, i, j)\n self.history.append(self.last_move)", "def execute_move(self, game_state):\n # Set new location based on which ghost this is\n game_state.ghosts_pos[self.ghost_id] = self.next_move", "def execute_move(self, game_state):\n # Set new location based on which ghost this is\n game_state.ghosts_pos[self.ghost_id] = self.next_move", "def move(self, x, y):\n if self.computer_first:\n self.app.config(cursor='watch')\n self.board = self.board.move(x, y)\n self.update()\n self.computer_first = 0\n self.app.config(cursor='')\n else:\n self.board = self.board.move(x, y)\n self.update()\n self.app.config(cursor='watch')\n move = self.board.best()\n if move:\n self.board = self.board.move(*move)\n self.update()\n self.app.config(cursor='')", "def play_move(self,state):\n self.__engine.set_state(state)\n result = self.__engine.getNextState()\n time_elapsed = self.__engine.get_time_elapsed()\n num_nodes = self.__engine.get_num_explored()\n if self.moves == 0:\n self.average_time = time_elapsed\n self.average_nodes = num_nodes\n else:\n self.average_time = ( (self.average_time * self.moves) + time_elapsed ) / (self.moves+1)\n self.average_nodes = ( (self.average_nodes * self.moves) + num_nodes ) / (self.moves+1)\n self.moves += 1\n return result", "def move(self,p,intMove):\n gs = self.gameState.copy() #copy Board\n gs[p.pos.get()] = EMPTY #put position it was at as empty\n gs[self.movePos(p,intMove).get()] = p.color #set new position as filled\n return ((p,intMove),Board(gs,self.togglePlayer(self.whoseTurn)))", "def move(x,y):\r\n pass", "def advance(self, board):", "def play_move(self,state):\n raise AIError(\"Must be implemented for child class!\")", "def make_move(self, board: Block) -> int:\n raise NotImplementedError", "def move(self, board, move_dir):\n if move_dir == \"right\":\n # failsafe: do not move through other cars on board\n if board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] == 'r':\n print(\"No movement!\")\n return board\n \n # give board correct new positions (characters)\n else:\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\"\n\n # change car objects positions\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) + 1)\n return board\n elif move_dir == \"left\": \n if board.positions[self.get_rows()[0]][self.get_cols()[0] - 1].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] == 'r':\n print(\"No movement!\")\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 2)] = \"x\"\n\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) - 1)\n return board\n elif move_dir == \"up\":\n #print(board.positions[self.get_rows()[0] - 1][self.get_cols()[0]])\n if board.positions[self.get_rows()[0] - 1][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n else:\n board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] = self.name[0]\n board.positions[self.get_rows()[1] + (self.size - 2)][self.get_cols()[0]] = \"x\"\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) - 1) + \".\" + str(self.get_cols()[0])\n\n #print(board)\n return board\n elif move_dir == \"down\": \n try: \n if board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n except IndexError:\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\" \n board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] = self.name[0]\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) + 1) + \".\" + str(self.get_cols()[0]) \n \n #print(self.position)\n #print(board)\n \n return board\n else:\n #print(\"NO MOVEMENT!\")\n return board", "def step(self, move):", "def move(self, action: Action) -> State:\n new_state = State(self.size_h, self.size_v, self.wall_squares, self.boxes, self.storage_locations,\n self.current_location, action)\n\n if action == Action.DOWN:\n down_loc = (new_state.current_location[0] + 1, new_state.current_location[1])\n two_away = (down_loc[0] + 1, down_loc[1])\n new_state.current_location = down_loc\n if down_loc in new_state.boxes:\n new_state.boxes.remove(down_loc)\n new_state.boxes.append(two_away)\n\n elif action == Action.UP:\n up_loc = (new_state.current_location[0] - 1, new_state.current_location[1])\n two_away = (up_loc[0] - 1, up_loc[1])\n new_state.current_location = up_loc\n if up_loc in new_state.boxes:\n new_state.boxes.remove(up_loc)\n new_state.boxes.append(two_away)\n\n elif action == Action.RIGHT:\n right_loc = (new_state.current_location[0], new_state.current_location[1] + 1)\n two_away = (right_loc[0], right_loc[1] + 1)\n new_state.current_location = right_loc\n if right_loc in new_state.boxes:\n new_state.boxes.remove(right_loc)\n new_state.boxes.append(two_away)\n\n elif action == Action.LEFT:\n left_loc = (new_state.current_location[0], new_state.current_location[1] - 1)\n two_away = (left_loc[0], left_loc[1] - 1)\n new_state.current_location = left_loc\n if left_loc in new_state.boxes:\n new_state.boxes.remove(left_loc)\n new_state.boxes.append(two_away)\n\n new_state._validate() # TODO: Remove me for the final product.\n return new_state", "def make_move(self):\n self.owner = self.game.current_turn\n self.status = 'X' if self.owner == self.game.creator else 'O'\n ####\n #Random turn??\n ####\n self.save(update_fields=['status', 'owner'])\n\n # Add log entry for move\n self.game.add_log(f'cell made at ({self.row}, {self.col}) by {self.owner}')\n\n # Set the current turn for the other player if game is not over\n # Check if find winner\n if self.game.check_win(cell=self) or\\\n self.game.get_all_game_cells().filter(status='EMPTY').count() == 0:\n print(\"Winnnnnnnn\")\n self.game.mark_complete(winner=self.owner)\n\n # Switch player turn\n self.game.switch_turn()\n\n # Let the game know about the move and result\n self.send_game_update()", "def make_move(self, state):\r\n # intially set drop phase to true\r\n drop_phase = True\r\n move = [] # list to make moves with to return\r\n succ = self.succ(state) # get the successor of this state\r\n # intial postion of board to set up most advantagous spot if its empty\r\n if sum(x.count(self.my_piece) for x in self.board) == 0 and self.board[2][2] == ' ':\r\n move.insert(0, (2, 2))\r\n return move\r\n \r\n # check the number of 'r' and 'b' on board if theres 4 of each drop phase is false\r\n if sum(x.count('r') for x in self.board) == 4 and sum(x.count('b') for x in self.board) == 4:\r\n drop_phase = False\r\n\r\n # if not during drop phase use minimax to make next move from one postion to next\r\n if not drop_phase:\r\n move = []\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n f = d['from']\r\n s = sorted(succ, key=lambda e: e['f'])\r\n moveto = s[-1]\r\n move.insert(1, (moveto['from'][0], moveto['from'][1]))\r\n move.insert(0, (moveto['pos'][0], moveto['pos'][1]))\r\n return move # return the from, to move\r\n\r\n else: #else use minimax and to make move during drop phase selecting spot to place AI piece\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n hold = []\r\n move = []\r\n n = None\r\n hold = []\r\n for s in succ:\r\n p = s['pos'][0]\r\n p1 = s['pos'][1]\r\n if s['f'] == val and state[p][p1] == ' ':\r\n hold.append(s)\r\n if len(hold) == 1:\r\n row = hold[0]['pos'][0]\r\n col = hold[0]['pos'][1]\r\n else:\r\n f = sorted(hold, key=lambda e: e['pos'])\r\n row = f[0]['pos'][0]\r\n col = f[0]['pos'][1]\r\n\r\n move.insert(0, (row, col)) # return the move \r\n return move", "def do_move(self, board):\n raise NotImplementedError(\"do_move method not implemented for Player: {}\".format(self))", "def move(self):\n \n self.position = self.wander()", "def make_move(self, board: Board) -> int:\n raise NotImplementedError", "def _move_randomly(self):\n a, b = randint(0, len(self.state) - 1), randint(0, len(self.state) - 1)\n wiz1, wiz2 = self.state[a], self.state[b]\n self._swap_wizards(wiz1, wiz2)", "def make_move(self, board):\n user_input = self.get_user_input(\n 'coordinates of next move (x,y): '\n )\n move = self.transform_user_input(user_input)\n\n valid = board.move_is_valid(move)\n while not valid:\n user_input = self.get_user_input(\n 'Invalid move, coordinate of next move: '\n )\n move = self.transform_user_input(user_input)\n valid = board.move_is_valid(move)\n board.set_piece(move, color=self.color)", "def make_move(self): \n if self.counter == 0:\n #AI makes a random move to start\n ai_move = random.randrange(0,((self.size[0] * self.size[1]) - 1)) \n \n #Number to coordinate conversion\n row = ai_move % self.size[0]\n column = ai_move % self.size[0]\n self.start_game((row, column))\n self.counter = 1\n\n if (self.board[(row, column)] == 'm'):\n #print() \"\\n\", \"First move RIP!, what are the odds...\"\n self.found_mine()\n self.gameover = 1\n \n else:\n row, column = self.find_move()\n \n #0.25 second wait \n #time.sleep(0.25)\n\n #Prints out to the terminal the move and type of move\n print(row, \",\", column)\n\n #Updates the GUI\n root.update()\n \n if (self.board[(row, column)] == 'm'):\n print(\"RIP!\") \n self.found_mine() \n self.gameover = 1\n \n elif self.board[(row, column)] == '0':\n print(\"No mines in sight\") \n self.found_space((row, column))\n\n elif self.board[(row, column)] == '1':\n print(\"There is 1 mine next to this spot\") \n self.found_border((row, column))\n else:\n print(\"There are\", self.board[(row, column)], \"mines next to this spot\") \n self.found_border((row, column))", "def _move_forward(self):\n\t\tself.x,self.y = Mario._get_coordinates(Board.prev_i,Board.prev_j,Board.prev_k)\n\t\tif(self.y<=798):\n\t\t\tself.y = self.y+1\n\t\t\tif Board.board[self.x][self.y]=='0':\n\t\t\t\tMario.score += 1\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\n\t\t\telif Board.board[self.x][self.y]=='P':\n\t\t\t\tMario.lives+=1\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_1-up.wav\"])\n\n\t\t\telif Board.board[self.x][self.y]=='A':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\t\t\t\tMario.attack = 1\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_powerup.wav\"])\n\n\t\t\telif Board.board[self.x][self.y]=='@':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tMario.lives-=1\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_mariodie.wav\"])\n\t\t\t\tif Mario.lives<=0:\n\t\t\t\t\tcall([\"aplay\",\"-q\",\"smb_gameover.wav\"])\n\t\t\t\t\treturn \"exit\"\n\t\t\t\tos.system('clear')\n\t\t\t\tprint(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\t\\t\\t\\t\\t\\tNumber of Mario left\",Mario.lives)\n\t\t\t\tMario.respawn(self.x,self.y)\n\t\t\t\ttime.sleep(2)\n\t\t\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)\n\n\t\t\telif(Board.board[self.x][self.y]=='/'):\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x-1][self.y]='M'\n\n\t\t\telif Board.board[self.x][self.y]=='I':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_stage_clear.wav\"])\n\t\t\t\tBoard.bonus_round()\n\n\t\t\telif Board.board[self.x][self.y]=='K':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_stage_clear.wav\"])\n\t\t\t\tenemy.boss_round()\n\n\t\t\telif(Board.board[self.x][self.y] in obstacles):\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y-1]='M'\n\n\t\t\telif((Board.board[self.x+1][self.y-1]=='/' or Board.board[self.x+1][self.y-1]=='T') and Board.board[self.x+1][self.y]==' '):\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y+1]='M'\n\t\t\t\tMario.go_down(self)\n\t\t\telse:\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\n\t\tif( self.y-1 >= ((Board.prev_j+Board.prev_k)/2) ):\n\t\t\tos.system('clear')\n\t\t\tBoard.prev_j += 1 \n\t\t\tBoard.prev_k += 1\n\t\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)\n\t\telse:\n\t\t\tos.system('clear')\n\t\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)", "def makeMove(self, move, player):", "def makeMove(self, board, move):\n\t\trotation, this_board = self.__getNormalizedAndRotatedBoard(board)\n\t\tthis_state = TicTacToeHelper.serializeBoard(this_board)\n\n\t\tthis_move = TicTacToeHelper.rotateMove(move, rotation)\n\n\t\tself.__state_history.append((this_state, this_move))", "def make_move(self, move: Any) -> 'StonehengeState':\n if type(move) == str:\n new_state = StonehengeState(not self.p1_turn, self.side_length)\n # copy the board information from current state\n # make copy of current state information\n hori_lst_copy = []\n for lst in self.hori_lst:\n temp = []\n for item in lst:\n temp.append(item)\n hori_lst_copy.append(temp)\n left_lst_copy = []\n for lst in self.left_lst:\n temp = []\n for item in lst:\n temp.append(item)\n left_lst_copy.append(temp)\n right_lst_copy = []\n for lst in self.right_lst:\n temp = []\n for item in lst:\n temp.append(item)\n right_lst_copy.append(temp)\n\n hori_result_copy = []\n for item in self.hori_result:\n hori_result_copy.append(item)\n left_result_copy = []\n for item in self.left_result:\n left_result_copy.append(item)\n right_result_copy = []\n for item in self.right_result:\n right_result_copy.append(item)\n\n new_state.hori_lst = hori_lst_copy\n new_state.hori_result = hori_result_copy\n new_state.left_lst = left_lst_copy\n new_state.left_result = left_result_copy\n new_state.right_lst = right_lst_copy\n new_state.right_result = right_result_copy\n # update the new state with str move\n # parallel nested list data structure\n lst = [new_state.hori_lst, new_state.left_lst, new_state.right_lst]\n result = [new_state.hori_result, new_state.left_result, new_state.right_result]\n # update the cell\n for i in range(len(lst)):\n for j in range(len(lst[i])):\n for k in range(len(lst[i][j])):\n if lst[i][j][k] == move:\n # should use the player name of last state, so opposite names\n if new_state.p1_turn:\n lst[i][j][k] = \"2\"\n else:\n lst[i][j][k] = \"1\"\n # update ley-line marks\n # the ley-line may belong to a player after this move\n p1_taken = 0\n p2_taken = 0\n if result[i][j] != \"@\":\n continue\n for item in lst[i][j]:\n if item == \"1\":\n p1_taken += 1\n if item == \"2\":\n p2_taken += 1\n if float(p1_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"1\"\n if float(p2_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"2\"\n ###### CHECK FOR SHALLOW COPY PROBLEM, IF ATTRIBUTE IS UPDATE IN NEW STATE\n return new_state", "def next_step(self, state, x, y):\n my_board = state\n if not is_new_move(my_board, x, y):\n return my_board, 0, False, {}\n while True:\n state, game_over = self.get_next_state(my_board, x, y)\n if not game_over:\n if is_win(state):\n return state, 1000, True, {}\n else:\n return state, 0, False, {}\n else:\n return state, -100, True, {}", "def move(self):\n \n self.position = self.explore()", "def _ai_move(self):\n move = self.AI_MOVES[self.game_board.get_string_board()][0]\n self.game_board.move_pieces(start=move[\"start\"], end=move[\"end\"])\n\n self.turn_value_text = \"You (Black)\"\n self.selected_piece_value_text = f\"N/A\"\n self.selected_move = -1\n\n self._sync_gui()", "def move(s, a, beta):\n # update velocity with probability 1-beta\n global V\n if np.random.choice(2, p=[beta, 1-beta]) == 1:\n if a in [0, 3, 6] and V > 0: V -= 1\n elif a in [2, 5, 8] and V < 3: V += 1\n # else:\n # print \"velocity not updated!\"\n\n r_border = range(6, 49, 7) # states on the right border\n l_border = range(0, 49, 7) # states on the left border\n t_border = range(7) # states on the top border\n\n units = range(V)\n # move RIGHT of V units:\n if a < len(ACTIONS) / 3:\n for i in units:\n WORLD[STATE2WORLD[s+i]] = '~' # draw my path gradualy in the world\n # crash: reset world and velocities, return to start state\n if s+i in r_border or s+i+1 in WALLS:\n reset()\n return START, CRASH\n # nothing special: draw where I end up & return\n WORLD[STATE2WORLD[s+V]] = 'O'\n return s+V, STEP\n\n # move UP of V units:\n elif a < 2*len(ACTIONS) / 3:\n for i in units:\n WORLD[STATE2WORLD[s-i*7]] = '|' # draw my path gradualy in the world\n # crash: reset world and velocities, return to start state\n if s-i*7 in t_border or s-(i+1)*7 in WALLS:\n reset()\n return START, CRASH\n # nothing special: draw where I end up & return\n WORLD[STATE2WORLD[s-V*7]] = 'O'\n return s-V*7, STEP\n\n # move LEFT of V units:\n elif a < len(ACTIONS):\n for i in units:\n WORLD[STATE2WORLD[s-i]] = '~' # draw my path gradualy in the world\n # goal: draw where I end up & return\n if s-i-1 in GOALS:\n WORLD[STATE2WORLD[s-i-1]] = 'O'\n return s-i-1, WIN\n # crash: reset world and velocities, return to start state\n elif s-i in l_border or s-i-1 in WALLS:\n reset()\n return START, CRASH\n # nothing special: draw where I end up & return\n WORLD[STATE2WORLD[s-V]] = 'O'\n return s-V, STEP\n\n return s, STEP # should never happen", "def move_agent(self, state):\n m = self.m\n n = self.n\n\n cur_env = deepcopy(state.grid)\n cur_env[m, n] = 0\n action = self.choose_action(state)\n\n if action == 'Right':\n if n + 1 >= grid_size or cur_env[m][n+1] != 0:\n Rew = -2 # Reward -5 if we move into wall or another agent\n self.collisions += 1\n else:\n n += 1\n Rew = -0.1 # Reward -1 otherwise\n a = 0 # Action number\n elif action == 'Left':\n if n - 1 < 0 or cur_env[m][n-1] != 0:\n Rew = -2\n self.collisions += 1\n else:\n n -= 1\n Rew = -0.1\n a = 1\n elif action == 'Up':\n if m - 1 < 0 or cur_env[m-1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m -= 1\n Rew = -0.1\n a = 2\n elif action == 'Down':\n if m + 1 >= grid_size or cur_env[m+1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m += 1\n Rew = -0.1\n a = 3\n\n m = m % grid_size\n n = n % grid_size\n self.m = m # Update position of agent\n self.n = n # Update position of agent\n cur_env[m][n] = 1 # Update grid\n new_state = State(cur_env, [m, n]) # Set new state\n terminal = False\n\n if [m, n] == self.end:\n Rew = 10\n terminal = True\n self.carry = True\n\n return new_state, a, Rew, terminal", "def move(self, action):\n \n self.counter += 1\n\n if action not in self.ACTIONS:\n raise Exception(\"Invalid action\")\n\n \n\n d_x, d_y = self.MOVEMENTS[action]\n x, y = self.position\n new_x, new_y = x + d_x, y + d_y\n new_X,new_Y=self.position_to_xy(new_x, new_y)\n \n\n if (new_x, new_y) not in self.cases:\n return self._get_state(), -3, False, self.ACTIONS\n \n \n \n elif (self.openGoal(new_x,new_y))&(new_X>-400):\n self.position = new_x, new_y\n self.positionxy = self.position_to_xy(new_x, new_y)\n \n return self._get_state(), 20, True, self.ACTIONS\n \n # elif not self.openGoal(new_x,new_y):\n # self.position = new_x, new_y\n # self.positionxy = self.position_to_xy(new_x, new_y)\n # return self._get_state(), -1, False, self.ACTIONS\n \n elif self.counter > 100:\n self.position = new_x, new_y\n self.positionxy = self.position_to_xy(new_x, new_y)\n return self._get_state(), -1, True, self.ACTIONS\n \n else:\n self.position = new_x, new_y\n self.positionxy = self.position_to_xy(new_x, new_y)\n return self._get_state(), -1, False, self.ACTIONS", "def move (self):\n\t\tself.x += self.direction[0]\n\t\tself.y += self.direction[1]\n\t\tself.row = (self.y - 15) / 30\n\t\tself.col = (self.x - 15) / 30\n\t\tself.rowcol = (self.row,self.col)\n\t\tself.draw()", "def next_step(self, state, x, y):\n my_board = state\n if not is_new_move(my_board, x, y):\n return my_board, -1, False, {}\n while True:\n state, game_over = self.get_next_state(my_board, x, y)\n if not game_over:\n if is_win(state):\n return state, 1000, True, {}\n else:\n return state, 5, False, {}\n else:\n return state, -100, True, {}", "def make_move(self, move: Any) -> \"StonehengeState\":\n new_board = deepcopy(self.current_board)\n for index in range(len(self.current_board)):\n if self.current_board[index] == move:\n if self.p1_turn:\n new_board = new_board.replace(\n self.current_board[index], '1')\n else:\n new_board = new_board.replace(\n self.current_board[index], '2')\n new_ley_lines = deepcopy(self.current_ley_lines)\n for item in new_ley_lines:\n for key in item:\n for index in range(len(key[1])):\n if key[1][index] == move:\n if self.p1_turn:\n key[1][index] = '1'\n else:\n key[1][index] = '2'\n change_dict = {}\n for item in new_ley_lines:\n for key in item:\n p1_count = 0\n p2_count = 0\n for string in key[1]:\n if string == '1':\n p1_count += 1\n if string == '2':\n p2_count += 1\n\n\n if p1_count >= len(key[1])/2 and p1_count > p2_count:\n\n change_dict[key[0]] = '1'\n if p2_count >= len(key[1])/2 and p2_count > p1_count:\n\n change_dict[key[0]] = '2'\n for key in change_dict:\n if not (key == '1' or key == '2'):\n if str(key) in new_board:\n new_board = new_board.replace(str(key), change_dict[key])\n for item in new_ley_lines:\n for key1 in item:\n if key == key1[0]:\n key1[0] = change_dict[key]\n\n new_state = StonehengeState(not self.p1_turn, self.side_length,\n new_board, new_ley_lines)\n return new_state", "def change_move_state(self, new_state):\n\n if new_state != self.move_state:\n print(\"Changing move state from \", states[self.move_state],\n \" to \", states[new_state])\n self.move_state = new_state\n print(\"move_state is now\", self.move_state)", "def update(self, opponent_action, player_action):\r\n #format\r\n #throw, type, location\r\n #swing/slide, prev, next\r\n self.turn_no += 1\r\n\r\n\r\n #format the moves by adding the token type\r\n opponent_action = self.add_token_type_to_index_one(opponent_action)\r\n player_action =self.add_token_type_to_index_one(player_action)\r\n\r\n \r\n \r\n #if were moving towards the same coord and it exists on the board for now \r\n if (opponent_action[-1] == player_action[-1]):\r\n \r\n #check if theres a three way tie \r\n if opponent_action[-1] in self.board_dict:\r\n #list them down \r\n composition = [self.board_dict[opponent_action[-1]], opponent_action[1], player_action[1]]\r\n \r\n #if theres three unique tokens there \r\n if len(composition) == len(set(composition)):\r\n self.three_way_checking(opponent_action, player_action)\r\n else:\r\n self.two_token_situation(opponent_action, player_action)\r\n \r\n #otherwise business as usual\r\n else:\r\n self.two_token_situation(opponent_action, player_action)\r\n\r\n\r\n #otherwise if its swapping positions \r\n elif opponent_action[2] == player_action[-1] and opponent_action[-1] == player_action[2]:\r\n\r\n #set for both \r\n add_token(self.board_dict, self.player_tokens, opponent_action[-1], opponent_action[1], 'them')\r\n self.player_tokens[opponent_action[-1]] = 'them'\r\n \r\n add_token(self.board_dict, self.player_tokens, player_action[-1], player_action[1], 'us')\r\n self.player_tokens[player_action[-1]] = \"us\"\r\n\r\n #redo this entire segment\r\n #CB PAIN\r\n else:\r\n self.update_board_with_moves(player_action, \"us\")\r\n #move them\r\n self.update_board_with_moves(opponent_action, \"them\")\r\n #them move for us\r", "def make_move(self, move: int) -> None:\n if move not in self._valid_moves:\n raise ValueError(f'Move \"{move}\" is not valid')\n\n self._update_board(move)\n\n self._win_state = self._check_winner()\n self._is_red_active = not self._is_red_active\n self.move_number += 1", "def computer_move(self):\n tree = LinkedBinaryTree(self)\n self.create_tree(tree)\n left_points = self._calculate_points(tree.get_left_child())\n right_points = self._calculate_points(tree.get_right_child())\n\n if left_points < right_points:\n next_board = tree.get_right_child().key\n else:\n next_board = tree.get_left_child().key\n self.board = next_board.board", "def select_move(self, game_state):\n raise NotImplementedError()", "def computerTurn(board):\n\n i, j = bestMove(board)\n\n board[i][j] = computer\n pygame.time.delay(500)\n updateWindow(i, j, computer)", "def move(state=None, actual_move=None):\n copy = state.copy()\n copy.push(chess.Move.from_uci(uci=actual_move))\n return copy", "def advance_board(self):\n raise NotImplementedError", "def move(self, board):\n\n if board.get_number_of_moves() == 0:\n random_row = randint(0, 2)\n random_column = randint(0, 2)\n\n if random_row == 1 or random_column == 1:\n random_row = 1\n random_column = 1\n elif random_row == 2:\n random_row = board.get_dimension()-1\n\n if random_column == 2:\n random_column = board.get_dimension()-1\n\n move = (random_row, random_column)\n elif board.get_number_of_moves() == 1 or board.get_number_of_moves() == 2:\n if board.get_piece(1,1) == ' ':\n move = (1, 1)\n else:\n board_dimension = board.get_dimension()-1\n corners = [(0, 0), (0, board_dimension), (board_dimension, 0), (board_dimension, board_dimension)]\n corners = self.remove_filled_positions(corners, board)\n\n move = corners[randint(0, len(corners)-1)]\n else:\n move = self.check_for_winner(board)\n\n if move == (-1, -1):\n board_dimension = board.get_dimension()-1\n corner1_moves = self.remove_filled_positions([(0, 0), (2, 2)], board)\n corner2_moves = self.remove_filled_positions([(0, 2), (2, 0)], board)\n\n non_corner_moves = self.remove_filled_positions([(1, 0), (2, 1), (1, 2), (0, 1)], board)\n\n center_piece = board.get_piece(1, 1)\n corner_pieces = [board.get_piece(0, 0), board.get_piece(board_dimension, 0), board.get_piece(0, board_dimension), board.get_piece(board_dimension, board_dimension)]\n\n if corner_pieces[0] != self._piece and corner_pieces[0] != ' ' and corner_pieces[0] == corner_pieces[3]:\n move = non_corner_moves[randint(0, 3)]\n elif corner_pieces[1] != self._piece and corner_pieces[1] != ' ' and corner_pieces[1] == corner_pieces[2]:\n move = non_corner_moves[randint(0, 3)]\n elif len(corner2_moves) > 0 and corner_pieces[0] != self._piece and corner_pieces[0] == center_piece and corner_pieces[3] == self._piece:\n move = corner2_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[1] != self._piece and corner_pieces[1] == center_piece and corner_pieces[2] == self._piece:\n move = corner1_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[2] != self._piece and corner_pieces[2] == center_piece and corner_pieces[1] == self._piece:\n move = corner1_moves[0]\n elif len(corner2_moves) > 0 and corner_pieces[3] != self._piece and corner_pieces[3] == center_piece and corner_pieces[0] == self._piece:\n move = corner2_moves[0]\n else:\n move = self.can_complete_two_in_row(board)\n\n if move == (-1, -1):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def animate(self):\n if self.board.automaton.get() == \"life\":\n self.state = self.life.game_of_life(self.state)\n #self.life.random_activations(self.state)\n elif self.board.automaton.get() == \"seeds\":\n self.state = self.life.seeds(self.state)\n else:\n pass\n\n self.board.update_cells(self.state)\n self.parent.after(DELAY, self.animate)", "def make_move(self, index):\n if self.board[index] is None and self.get_winner() is None:\n self.board[index] = self.player\n self.player = 'O' if self.player == 'X' else 'X'\n self.winner = self.get_winner()", "def apply_move(self, start_move, move):\n\t\t# check that the start move and the move are Move objects\n\t\tif not isinstance(move, Move):\n\t\t\tmove = Move(move)\n\t\tif not isinstance(start_move, Move):\n\t\t\tstart_move = Move(start_move)\n\t\t# copy the board\n\t\tnext_board = copy.deepcopy(self.board)\n\t\t# place the move on the next board\n\t\tnext_board.place(self.next_player, start_move.point, move.point)\n\t\treturn GameState(next_board, self.next_player.other, move)", "def update_state_game_variables(self):\n self.model.numbers = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n self.model.player_mark = \"\"\n self.model.player_move = 0\n self.model.boards = [\"board\"] * 9\n self.update_score_board()", "def moveable(self, board):\n # horizontal\n if self.direction == \"horizontal\":\n # the position to which the car wants to move is either 1 more or 1 less column wise\n right = self.get_cols()[1] + self.size - 1\n left = self.get_cols()[0] - 1\n # check if right or left is out of the boards margins \n if right > board.width_height:\n move_left = board.positions[self.get_rows()[0]][left]\n move_right = None\n elif left < 0:\n move_right = board.positions[self.get_rows()[0]][right]\n move_left = None\n else: \n move_right = board.positions[self.get_rows()[0]][right]\n move_left = board.positions[self.get_rows()[0]][left]\n\n # try to move left and right\n if move_right == \"x\" and move_left == \"x\":\n return \"leftright\"\n elif move_right == \"x\":\n return \"right\"\n elif move_left == \"x\":\n return \"left\"\n else: \n return \"none\"\n \n # vertical\n else:\n up = self.get_rows()[0] - 1\n #print(up)\n down = self.get_rows()[1] + self.size - 1\n # check if up or down is out of the boards margins \n if up < 0:\n # no room on the board for upward movement\n move_down = board.positions[down][self.get_cols()[0]]\n move_up = None\n elif down > board.width_height:\n # no room on the board for downward movement\n move_up = board.positions[up][self.get_cols()[0]]\n move_down = None\n else:\n # both up and down are possible positions on the board\n move_up = board.positions[up][self.get_cols()[0]]\n move_down = board.positions[down][self.get_cols()[0]]\n\n # try to move up and down\n if move_down == \"x\" and move_up == \"x\":\n return \"updown\"\n elif move_up == \"x\":\n return \"up\"\n elif move_down == \"x\":\n return \"down\"\n else: \n return \"none\"", "def make_move(self, current_state):\n\n\t\tbatch_size = 192\n\n\t\ttest_board = np.zeros((batch_size, 3, 3, 3))\n\t\ttest_cows = np.zeros((batch_size, 2))\n\t\ttest_labels = np.zeros((batch_size, 1)) \n\n\t\tnew_states = current_state.expand_states()\n\n\t\tif len(new_states) == 0:\n\t\t\treturn None\n\n\t\tfor i, state in enumerate(new_states):\n\n\t\t\tdesc = self.state_descriptor(state, self.player_index)\n\t\t\ttest_board[i] = np.asarray(desc[0])\n\t\t\ttest_cows[i] = np.asarray([desc[1], desc[2]])\n\n\t\treturn new_states[self.train_value_function_approximation(\n\t\t\tTrue, (test_board, test_cows, test_labels), len(new_states))]", "def simulate(state: GameState) -> int:\n moves = list(state.moves)\n #print(\" moves available: \", moves)\n for i in range(len(state.moves)):\n move = random.choice(moves)\n #print(\" move making: \", move)\n move_idx = moves.index(move)\n #print(\" index of move: \", move_idx)\n moves.pop(move_idx)\n #print(\" new moves available: \", moves)\n state = state.traverse(move)\n #print(\" Winner: \", state.util)\n #print(\" New Board: \", state.display)\n return state.util", "def play_human_move(self):\n success, info = self.gms.play_human_move(raw_input('Make your next move\\n'.format('')))\n if success:\n print(self.gms.game.get_board_state_pretty())\n if info['status_code'] == core_constants.GAME_STATUS_HUMAN_MOVE_REQUIRED:\n self.play_human_move()\n elif info['status_code'] in [\n core_constants.GAME_STATUS_OVER_DRAW,\n core_constants.GAME_STATUS_OVER_HUMAN_WINNER,\n core_constants.GAME_STATUS_OVER_COMP_WINNER,\n ]:\n print(self.gms.status_code_message_map[info['status_code']])\n else:\n if info['error_code'] == core_constants.ERROR_CODE_INVALID_MOVE:\n self.play_human_move()", "def move(self, x, y):\n # Check if coordinates are occupied\n if self[x, y] is not self.EMPTY:\n raise BoardError('Cannot move on top of another piece!')\n\n # Store history and make move\n self._push_history()\n self[x, y] = self._turn\n\n # Check if any pieces have been taken\n taken = self._take_pieces(x, y)\n\n # Check if move is suicidal. A suicidal move is a move that takes no\n # pieces and is played on a coordinate which has no liberties.\n if taken == 0:\n self._check_for_suicide(x, y)\n\n # Check if move is redundant. A redundant move is one that would\n # return the board to the state at the time of a player's last move.\n self._check_for_ko()\n\n self._flip_turn()\n self._redo = []", "def move(self, move):\n raise NotImplementedError()", "def move(self):\n\n choices = []\n if self.game.green_apples > 0:\n choices.append(self.game.move_green)\n if self.game.red_apples > 0:\n choices.append(self.game.move_red)\n if self.game.blue_plums > 0:\n choices.append(self.game.move_blue)\n if self.game.yellow_pears > 0:\n choices.append(self.game.move_yellow)\n\n random_index = random.randint(0, len(choices) - 1)\n f = choices[random_index]\n f(True)", "def make_step(self, current_state, action):\n\n if current_state == 12:\n current_state = current_state + 50\n elif current_state == 23:\n current_state = current_state + 25\n\n new_state = current_state\n\n if 0 <= current_state < 25:\n dimension = 1\n elif 25 <= current_state < 50:\n dimension = 2\n elif 50 <= current_state < 75:\n dimension = 3\n else:\n print(\"Error in dimension\")\n\n # Update new_position based on the chosen action and check whether agent hits a wall.\n if action == \"n\":\n temp_state = current_state + self.num_cols\n if temp_state < self.num_cells * dimension:\n new_state = temp_state\n elif action == \"e\":\n temp_state = current_state + 1\n if temp_state % self.num_cols > 0:\n new_state = temp_state\n elif action == \"s\":\n temp_state = current_state - self.num_cols\n if temp_state >= 0 + (25 * (dimension - 1)):\n new_state = temp_state\n elif action == \"w\":\n temp_state = current_state - 1\n if temp_state % self.num_cols < self.num_cols - 1:\n new_state = temp_state\n else:\n raise ValueError('Action was mis-specified!')\n\n # Get reward\n reward = self.rewards[new_state]\n\n # Deduct 1 from reward for every attempted move\n reward -= 1\n\n return (new_state, reward)", "def undo_move(self):\n # general idea:\n # store the state of the board in a stack before every successful attempted move \n # when this is called, set the current board equal to the top state in the stack\n # print(\"Undo\")\n # print(self)\n # if len(self.board_states) != 0:\n if self.moves != 0:\n self.moves -= 1\n self.stock = []\n self.wp = []\n self.foundations = []\n self.tableaus = []\n self.stock, self.wp, self.foundations, self.tableaus = self.board_states.pop()\n self.init_move_dict()", "def make_move(self, row:int, col:int,curr_move):\n self.array[row][col] = curr_move", "def make_move(self):\r\n if self.running and self.run:\r\n if self.board is None:\r\n SlTrace.lg(\"sp.board is None\")\r\n return False\r\n \r\n SlTrace.lg(\"running_loop self.running and self.run\", \"running_loop\")\r\n SlTrace.lg(\"running_loop self.start_move\", \"running_loop\")\r\n if self.start_move():\r\n SlTrace.lg(\"running_loop successful start_move\", \"running_loop\")\r\n self.next_move_no()\r\n SlTrace.lg(\"running_loop after start_move\", \"running_loop\")\r\n if self.to_pause:\r\n self.pause_cmd()\r\n self.to_pause = False\r\n return True", "def make_move(state, action, player, rewarding_move=False): # TODO : done and next_is_reward can be removed as\n # they are in the state object\n board = state.get_board()\n json_action = action.get_json_action()\n action = action.get_action_as_dict()\n captured = None\n reward = 0\n next_is_reward = False\n previous_is_reward = False\n if rewarding_move:\n state.boring_moves = 0\n previous_is_reward = True\n if action['action_type'] == YoteActionType.STEAL_FROM_HAND:\n reward += 1\n state.in_hand[player * -1] -= 1\n elif action['action_type'] == YoteActionType.STEAL_FROM_BOARD:\n board.empty_cell(action['action']['at'])\n reward += 1\n else:\n if action['action_type'] == YoteActionType.ADD:\n state.boring_moves += 1\n state.in_hand[player] -= 1\n board.fill_cell(action['action']['to'], Color(player))\n elif action['action_type'] == YoteActionType.MOVE:\n at = action['action']['at']\n to = action['action']['to']\n\n def distance(cell_1, cell_2):\n import math\n return math.sqrt((cell_1[0] - cell_2[0]) ** 2 + (cell_1[1] - cell_2[1]) ** 2)\n\n board.empty_cell(at)\n board.fill_cell(to, Color(player))\n if int(distance(at, to)) == 1:\n state.boring_moves += 1\n elif int(distance(at, to)) > 1:\n state.boring_moves = 0\n next_is_reward = True\n board.fill_cell(to, Color(player))\n if at[0] == to[0] and at[1] < to[1]:\n board.empty_cell((at[0], at[1] + 1))\n captured = (at[0], at[1] + 1)\n elif at[0] == to[0] and at[1] > to[1]:\n board.empty_cell((at[0], at[1] - 1))\n captured = (at[0], at[1] - 1)\n elif at[1] == to[1] and at[0] < to[0]:\n board.empty_cell((at[0] + 1, at[1]))\n captured = (at[0] + 1, at[1])\n elif at[1] == to[1] and at[0] > to[0]:\n board.empty_cell((at[0] - 1, at[1]))\n captured = (at[0] - 1, at[1])\n reward += 1\n\n state.set_board(board)\n state.score[player] += reward\n state.captured = captured\n state.rewarding_move = next_is_reward\n state.previous_is_reward = previous_is_reward\n state.set_latest_player(player)\n state.set_latest_move(json_action)\n if next_is_reward:\n state.set_next_player(player)\n else:\n state.set_next_player(player * -1)\n\n done = YoteRules.is_end_game(state)\n return state, done, next_is_reward", "def execute_move(self, move, color):\n\n (x, y) = move\n\n # Add the piece to the empty square.\n assert self[x][y] == 0\n self[x][y] = color", "def move_random(self, board: Board) -> None:\n rnd_move_idx = randint(0,4)\n # moves: stay, up, left, right, down\n moves = [[0,0], [0,-1], [-1,0], [1,0], [0,1]]\n\n if board.can_position_at(self.x + moves[rnd_move_idx][0], self.y + moves[rnd_move_idx][1]):\n board.set_element_at_position(0, self.x, self.y)\n self.x += moves[rnd_move_idx][0]\n self.y += moves[rnd_move_idx][1]\n board.set_element_at_position(3, self.x, self.y)\n print(\"Bomberman moved to [\", self.x, \",\", self.y, \"]\")", "def execute_move(self, move, color):\n\n #Much like move generation, start at the new piece's square and\n #follow it on all 8 directions to look for a piece allowing flipping.\n\n # Add the piece to the empty square.\n # print(move)\n flips = [flip for direction in self.__directions\n for flip in self._get_flips(move, direction, color)]\n assert len(list(flips))>0\n for x, y in flips:\n #print(self[x][y],color)\n self[x][y] = color", "def move_player(direction):\n global ZERO_BASE_PLYR_POS\n if direction == \"north\":\n ZERO_BASE_PLYR_POS -= 10\n elif direction == \"south\":\n ZERO_BASE_PLYR_POS += 10\n elif direction == \"west\":\n ZERO_BASE_PLYR_POS -= 1\n elif direction == \"east\":\n ZERO_BASE_PLYR_POS += 1\n \n sleep(0.5) # all moves have a 0.5 second delay\n \n show_ground_feature()", "def _next_state(self, state, action):\n\n # Transition table to define movement for each action\n if self.action_type == 'VonNeumann':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1]}\n elif self.action_type == 'Moore':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1],\n 4: [-1, +1], 5: [+1, +1], 6: [-1, -1], 7: [+1, -1]}\n\n new_state = [state[0] + transitions[action][0], state[1] + transitions[action][1]]\n if self.maze[new_state[0]][new_state[1]] == 1: # Hit wall, stay there\n return state\n else: # Valid move for 0, 2, 3, 4\n return new_state", "def state_cb(self, msg):\n self.prev_state = deepcopy(self.current_state)\n self.current_state = msg\n\n if self.current_state.mode == \"MANUAL\":\n if self.offboard_point_streaming:\n rospy.loginfo(\"Setpoint stream DISABLED\")\n self.stop_streaming_offboard_points()\n\n if self.current_state.mode == \"POSCTL\":\n if not self.offboard_point_streaming:\n rospy.loginfo(\"Setpoint stream ENABLED\")\n self.start_streaming_offboard_points()\n if not self.prev_state.mode == \"POSCTL\":\n # just switched into POSCTL, call hover\n self.hover()\n\n if self.current_state.mode == \"OFFBOARD\":\n if not self.prev_state.mode == \"OFFBOARD\":\n # just switched to OFFBOARD, call move\n rospy.loginfo(\"Entering OFFBOARD Mode\")\n for i in range(0,len(velocities)):\n maneuver_velocity_setpoint=velocities[i]\n maneuver_reference_frame = maneuver_reference_Frame\n maneuver_duration=duration[i]\n self.execute_maneuver( self.maneuver_velocity_setpoint, \n self.maneuver_reference_frame, \n self.maneuver_duration)", "def make_move(self, state):\n emptySpaces = 0\n for row in state:\n emptySpaces = emptySpaces + row.count(' ')\n if emptySpaces > 17:\n drop_phase = True\n else:\n drop_phase = False\n\n move = []\n if not drop_phase:\n best_value = float('-inf')\n best_state = None\n for s in self.succ(state, False, self.my_piece):\n if self.game_value(s) == -1 or self.game_value(s) == 1:\n best_state = s\n break\n currValue = self.Min_Value(state, 0)\n if currValue>best_value:\n best_value = currValue\n best_state = s\n for i in range(len(state)):\n for j in range(len(state[i])):\n if state[i][j]!= ' ' and best_state[i][j]== ' ':\n move.append((i,j))\n if state[i][j]== ' ' and best_state[i][j]!= ' ':\n move.insert(0, (i,j))\n \n # TODO: choose a piece to move and remove it from the board\n # (You may move this condition anywhere, just be sure to handle it)\n #\n # Until this part is implemented and the move list is updated\n # accordingly, the AI will not follow the rules after the drop phase!\n \n\n # select an unoccupied space randomly\n # TODO: implement a minimax algorithm to play better\n \n else:\n best_value = float('-inf')\n best_state = None\n for s in self.succ(state, True, self.my_piece):\n if self.game_value(s) == -1 or self.game_value(s) == 1:\n best_state = s\n break\n currValue = self.Min_Value(state, 0)\n if currValue>best_value:\n best_value = currValue\n best_state = s\n for i in range(len(state)):\n for j in range(len(state[i])):\n if state[i][j]== ' ' and best_state[i][j]!= ' ':\n move.insert(0, (i,j))\n \n return move", "def make_move(board, player_num, row, col):\n board[row][col] = 'X' if player_num == 1 else 'O'", "def go_up(self):\n\t\tself.x,self.y = Mario._get_coordinates(Board.prev_i,Board.prev_j,Board.prev_k)\n\t\tMario._pass(self.x,self.y)\n\t\ttemp = self.x\n\t\tflag = 0\n\t\twhile(temp>=self.x-8):\n\t\t\tif(Board.board[temp][self.y] in obstacles):\n\t\t\t\tflag = 1\n\t\t\t\ttemp_x = temp+1\n\t\t\t\tbreak\n\t\t\ttemp = temp-1\n\n\t\tif(not flag):\n\t\t\ttemp_x = self.x-8\n\n\t\tif Board.board[temp_x-1][self.y]=='B':\n\t\t\tnew = self.y\n\t\t\tfor i in range(new-4,new+5):\n\t\t\t\tif Board.board[temp_x-1][i]=='B':\n\t\t\t\t\tBoard.board[temp_x-1][i]='T'\n\t\t\tMario.bonus+=50\n\t\t\tif self.y==229 or self.y ==230 or self.y==231:\n\t\t\t\tBoard.board[23][230]='P'\n\n\n\t\tBoard.board[temp_x][self.y] = 'M'\t\t\n\t\tos.system('clear')\n\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)", "def __play_move(self, x, y):\n\t\tself.board[y][x] = self.current_player\n\t\tself.current_player = self.__negated_player(self.current_player)", "def next_turn(self): \n if (self.moves):\n self.board = self.select_move() \n self.moves = []\n self.roll = self.roll_dice()\n self.player = not self.player\n self.generate_valid_moves()", "def move(self, action):\n ligne = self.location_[0] + self.actions_[action][0]\n column = self.location_[1] + self.actions_[action][1]\n newLocation = (ligne, column)\n self.location_ = newLocation\n newState = (self.location_[0] * self.width ) + self.location_[1]\n\n if self.location_[0] == 0 and self.location_[0] == 0:\n return 0\n\n return newState", "def make_move(self, board: Block):\n # select a random block and highlight it.\n rand_block = select_random_block(board)\n rand_block.highlighted = True\n self.renderer.draw(board, self.id)\n pygame.time.wait(TIME_DELAY)\n choice = random.randint(0, 4)\n\n if rand_block.level == rand_block.max_depth or rand_block.level == 0:\n # Random player has chosen to smash an invalid block thus its move\n # is forfeited\n if choice == 4:\n pass\n else:\n perform_move(rand_block, choice)\n else:\n perform_move(rand_block, choice)\n rand_block.highlighted = False\n self.renderer.draw(board, self.id)\n return 0", "def _walk(self):\n new_pos = self.rect.move((self.move, 0)) # move 9 pixel to the right per frame\n if self.rect.left < self.area.left or self.rect.right > self.area.right:\n self.move = -self.move # move to the opposite direction when the chimp position exceeds the screen\n new_pos = self.rect.move((self.move, 0))\n self.image = pygame.transform.flip(\n self.image, 1, 0\n ) # mirror the chimp to make it looks like turning around\n self.rect = new_pos", "def play_move(self,state):\n #Keep asking for the next move until a valid move.\n while(True):\n childList = state.get_successors()\n print(\"Your possible moves:\")\n i = 0\n for c in childList:\n if i > 0 and i%4 == 0:\n print()\n print(c.get_action().ljust(10),end=\"\\t\");\n i += 1\n print()\n nextMove = input(\"What is your next move? \\ne.g.'F2-E3' or 'Quit'\\n\")\n #Check if the move is valid\n if nextMove.lower() == 'Quit'.lower():\n return None\n for c in childList:\n if c.get_action().upper() == nextMove.upper():\n return c\n # Move not possible \n print(\"Invalid move!! Please try again...\\n\")", "def move(self):\r\n if self.d == 'NORTH' and (self.y + 1) <= table_max_y:\r\n self.y += 1\r\n elif self.d == 'EAST' and (self.x + 1) <= table_max_x:\r\n self.x += 1\r\n elif self.d == 'SOUTH' and (self.y - 1) >= 0:\r\n self.y -= 1\r\n elif self.d == 'WEST' and (self.x - 1) >= 0:\r\n self.x -= 1\r\n else:\r\n print(\"Edge of Table Reached!\")", "def move(self, movement):\n index = self.state.index(0)\n\n new_state = self.state.copy()\n\n if movement == 'up':\n new_state[index], new_state[index - 3] = new_state[index - 3], new_state[index]\n elif movement == 'down':\n new_state[index], new_state[index + 3] = new_state[index + 3], new_state[index]\n elif movement == 'left':\n new_state[index], new_state[index - 1] = new_state[index - 1], new_state[index]\n else:\n # movement == 'right'\n new_state[index], new_state[index + 1] = new_state[index + 1], new_state[index]\n \n return new_state", "def decide_move(self, action):\n x1, y1 = action['xy1']\n x2, y2 = action['xy2']\n self.__state.push(action)", "def move(self) -> None:\n\n if self.move_up:\n self.__moveUpIfPossible()\n if self.move_down:\n self.__moveDownIfPossible()", "def calculate_next_board_state(self):\n new_board_state = np.zeros_like(self.board_state)\n\n for x in range(self.board_size[0]):\n for y in range(self.board_size[0]):\n new_board_state[x][y] = self.next_state_of_cell(x,y)\n \n self.set_state(new_board_state)", "def new_move(self, grid_x, grid_y, player):\n #duplication /!\\\n if player == self.X:\n self.draw_X(grid_x, grid_y)\n self.board[grid_y][grid_x] = self.X\n\n elif player == self.O:\n self.draw_O(grid_x, grid_y)\n self.board[grid_y][grid_x] = self.O" ]
[ "0.7646966", "0.75079894", "0.74252313", "0.72777075", "0.7197312", "0.7128416", "0.71194434", "0.70886326", "0.7026608", "0.70223653", "0.7015145", "0.6997819", "0.69861656", "0.69759107", "0.69399935", "0.693564", "0.6930939", "0.6925819", "0.6908902", "0.68981934", "0.6890927", "0.6886171", "0.6886171", "0.68857586", "0.68755907", "0.6856877", "0.6852084", "0.6851368", "0.68512744", "0.6826972", "0.6814997", "0.6801284", "0.6799901", "0.67841", "0.67676455", "0.67611724", "0.67443097", "0.6738912", "0.67256784", "0.6723076", "0.67195714", "0.6716892", "0.6709135", "0.66922015", "0.66721505", "0.66653895", "0.6643129", "0.6639475", "0.6630446", "0.6626167", "0.6621164", "0.6609937", "0.6601137", "0.6592837", "0.6587824", "0.65850294", "0.65743095", "0.6561921", "0.6556582", "0.65481764", "0.65474534", "0.65427035", "0.6539382", "0.6537191", "0.65351427", "0.6521122", "0.65151805", "0.65115505", "0.65103203", "0.6509952", "0.64930296", "0.64918876", "0.64886963", "0.6485113", "0.64826125", "0.64819944", "0.64764977", "0.6473455", "0.64729196", "0.6470955", "0.6468626", "0.6463499", "0.646129", "0.6459498", "0.64572585", "0.64548033", "0.6454597", "0.6454041", "0.64491713", "0.64471817", "0.64343053", "0.6430854", "0.64298886", "0.64290786", "0.6422462", "0.6420233", "0.64201945", "0.6420035", "0.64182097", "0.6418101" ]
0.6485864
73
This function defines the inital board of the game
def init_board(): # Generates a table 10*10 of 0s with -1 around and the initial state # of the board with 2 whites and 2 blacks in the middle table = [[0 if i != 0 and i != 9 else -1 for i in range(10)] if j != 0 and j != 9 else [-1 for i in range(10)] for j in range(10)] #leaves a -1 line around the whole table of 0s #initial state is drawn and recorded table[4][4] = 2 table[5][5] = 2 table[4][5] = 1 table[5][4] = 1 drawPiece((4,4),2) drawPiece((5,5),2) drawPiece((4,5),1) drawPiece((5,4),1) return table
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initBoard(self):\n pass", "def initialize_board(self):\n self.board = np.zeros(shape=(BOARD_SIZE, BOARD_SIZE), dtype=np.int) # another way of defining board: [[for x in range(cm.BOARD_SIZE)] for x in range(cm.BOARD_SIZE)]\n center = int(BOARD_SIZE / 2)\n self.board[center-1][center-1] = self.board[center][center] = WHITE # place the board according to position\n self.board[center][center-1] = self.board[center-1][center] = BLACK\n self.black_piece = 2\n self.white_piece = 2", "def init_board():\n board = ['#', 1, 2, 3, 4, 5, 6, 7, 8, 9]\n return board", "def initial_board():\n board = [OUTER] * 100\n for i in Othello.squares():\n board[i] = EMPTY\n # The middle four squares should hold the initial piece positions.\n board[44], board[45] = BLACK, WHITE\n board[54], board[55] = WHITE, BLACK\n return board", "def initialize_board():\n # Wipe current board\n for x in range(len(THE_BOARD.positions)):\n for y in range(len(THE_BOARD.positions)):\n THE_BOARD.positions[x][y] = ' '\n\n all_pieces = []\n\n # Pawns\n white_pawns = [Pawn('white', (6, i)) for i in range(len(THE_BOARD.positions[6]))]\n black_pawns = [Pawn('black', (1, i)) for i in range(len(THE_BOARD.positions[1]))]\n all_pieces.extend(white_pawns)\n all_pieces.extend(black_pawns)\n\n # Rooks\n rook1 = Rook('black', (0, 0))\n all_pieces.append(rook1)\n rook2 = Rook('black', (0, 7))\n all_pieces.append(rook2)\n rook3 = Rook('white', (7, 0))\n all_pieces.append(rook3)\n rook4 = Rook('white', (7, 7))\n all_pieces.append(rook4)\n\n # Knights\n knight1 = Knight('black', (0, 1))\n all_pieces.append(knight1)\n knight2 = Knight('black', (0, 6))\n all_pieces.append(knight2)\n knight3 = Knight('white', (7, 1))\n all_pieces.append(knight3)\n knight4 = Knight('white', (7, 6))\n all_pieces.append(knight4)\n\n # Bishops\n bishop1 = Bishop('black', (0, 2))\n all_pieces.append(bishop1)\n bishop2 = Bishop('black', (0, 5))\n all_pieces.append(bishop2)\n bishop3 = Bishop('white', (7, 2))\n all_pieces.append(bishop3)\n bishop4 = Bishop('white', (7, 5))\n all_pieces.append(bishop4)\n\n # King and Queen\n queen1 = Queen('black', (0, 4))\n all_pieces.append(queen1)\n queen2 = Queen('white', (7, 4))\n all_pieces.append(queen2)\n king1 = King('black', (0, 3))\n all_pieces.append(king1)\n king2 = King('white', (7, 3))\n all_pieces.append(king2)\n\n # Add every single piece to the board. Only then can they update their spaces threatened\n for piece in all_pieces:\n THE_BOARD.update(piece)\n THE_BOARD.update_all_spaces_threatened()", "def set_board(board):", "def initialize_board(self):\n self.board_values = {x:x for x in(range(1,10))}", "def init_board(self) -> None:\n\t\tself.canvas.create_rectangle(0, 0, self.canvas_width, self.canvas_height, fill=self.color_background)\n\t\tfor x in range(0, self.canvas_width, self.canvas_width//self.board_size):\n\t\t\tself.canvas.create_line(x, 0, x, self.canvas_width, fill=self.color_tile_border)\n\n\t\tfor y in range(0, self.canvas_width+1, self.canvas_height//self.board_size):\n\t\t\tself.canvas.create_line(0, y, self.canvas_height, y, fill=self.color_tile_border)\n\n\t\tself.text_area.delete('0.1', '2.1')", "def setup_board(self):\n\n for row in range(10):\n\n row_list = list()\n\n for column in range(9):\n\n row_list.append(None)\n\n self._board.append(row_list)", "def __init__ (self, cols = 6, rows = 7, requiredToWin = 4):\r\n\t\tself.cols = cols\r\n\t\tself.rows = rows\r\n\t\tself.win = requiredToWin\r\n\t\tself.board = [[NONE] * rows for _ in range(cols)]", "def initial_state() -> Board:\n board = (\"rnbqkbnr\", \"pppppppp\", \"........\", \"........\", \"........\",\n \"........\", \"PPPPPPPP\", \"RNBQKBNR\")\n\n return board", "def __init__(self):\n self.board = [[0 for i in range(9)]]*9\n self.board = [[0, 0, 0, 0, 3, 0, 9, 0, 0],\n [0, 0, 3, 0, 8, 0, 0, 0, 7],\n [6, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 5, 8, 3, 6, 0, 0, 0, 0],\n [0, 1, 0, 8, 9, 4, 0, 6, 0],\n [0, 0, 0, 0, 2, 7, 8, 4, 0],\n [0, 0, 9, 0, 0, 0, 0, 0, 8],\n [7, 0, 0, 0, 4, 0, 6, 0, 0],\n [0, 0, 5, 0, 1, 0, 0, 0, 0]]", "def __init__(self):\n\n # self.board = [[str(row)+\" \"]+[\" \" for col in range (10)] for row in range(1,11)]\n # self.board.append([\" \", '1 ', '2 ', '3 ', '4 ', '5 ', '6 ', '7 ', '8 ', '9 ', '10'])\n self.board = [[\" \" for col in range (10)] for row in range(10)]\n # self.board[9][0]=\"10\"", "def setup(self):\n piece_order = ['ROOK','KNIGHT','BISHOP','QUEEN','KING','BISHOP','KNIGHT','ROOK']\n for row,colour in zip([0,7],['BLACK','WHITE']):\n for col,piece in enumerate(piece_order):\n self.board[row][col] = colour + '_' + piece\n \n for row,colour in zip([1,6],['BLACK','WHITE']):\n for i in range(8):\n self.board[row][i] = colour + '_' + 'PAWN'\n \n self.toplay = 'WHITE'", "def __init__(self):\n self.board = [[T.Tile().getColor() for x in range(6)] for y in range(6)]", "def __init__(self):\r\n\t\tself.game_board = [['0','0','0'],['0','0','0'],['0','0','0']]\r\n\t\tself.count = 0\r\n\t\tself.x_turn = True\r\n\t\r\n\r\n\t\tpass", "def __init__(self):\n self.board = [\n [None, None, None],\n [None, None, None],\n [None, None, None]\n ]", "def board_init():\n board = [[[i for i in range(1,n+1)] for j in range(n)] for k in range(n)]\n return board", "def __init__(self):\n\n self.__turn_info = { 'turn': ChessGame.WHITE }\n self.init_board()", "def init():\n for i in range(COLS):\n for j in range(ROWS):\n BOARD[i][j] = int(random(2))", "def __init__(self, initial_board):\n self.initial_board = initial_board", "def __init__(self):\n\n self._length = 8\n self.board = []\n self.columns = \"ABCDEFGH\"\n for colNum in range(0, self._length):\n self.board.append([])\n for rowNum in range(0, self._length):\n self.board[colNum].append(Tile(colNum, rowNum))\n\n self.board[3][3].color = \"blue\"\n self.board[3][4].color = \"red\"\n self.board[4][3].color = \"red\"\n self.board[4][4].color = \"blue\"", "def _initiate_board(self):\n grid = []\n for i in range(constant.BOARD_DIMENSION):\n # Starts each row\n current_row = []\n for j in range(constant.BOARD_DIMENSION):\n # Adds the pieces depending on the position\n if i < constant.ROWS_OF_PIECES:\n # Black pieces\n if (j + i) % 2 != 0:\n current_row.append(Piece(i, j, Player.black))\n self.num_black_pieces = self.num_black_pieces + 1\n else:\n current_row.append(None)\n\n elif i >= constant.BOARD_DIMENSION - constant.ROWS_OF_PIECES:\n # White pieces\n if (j + i) % 2 != 0:\n current_row.append(Piece(i, j, Player.white))\n self.num_white_pieces = self.num_white_pieces + 1\n else:\n current_row.append(None)\n\n else:\n current_row.append(None)\n\n grid.append(current_row)\n\n return grid", "def _create_board(self):\n board = []\n for i in range(self.rows):\n row = []\n for j in range(self.columns):\n row.append(\n {\n \"c\": j + 1, # c column number base 1\n \"r\": i + 1, # r row number base 1\n \"v\": False, # v visible\n \"f\": 0, # f flag\n \"n\": 0, # n neighbors value\n \"b\": False, # has a bomb , The bombs are created on start\n }\n )\n board.append(row)\n self.board = board", "def setup_new_board(self):\n\n logger.info(u'setup_new_board()')\n\n self.squares = [[None for j in xrange(8)] for i in xrange(8)]\n \n self.black_checkers = [ch.Checker(u'black', self) for i in xrange(12)]\n self.white_checkers = [ch.Checker(u'white', self) for i in xrange(12)]\n\n u\"\"\" Place checkers in starting squares \"\"\"\n i = 0\n for row in xrange(3):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.white_checkers[i])\n i += 1\n\n i = 0\n for row in xrange(5, 8):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.black_checkers[i])\n i += 1", "def initial_state():\n board = [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]\n return board", "def __init__(self):\n self.board = [\n BS, BS, BS, BS,\n BS, BS, BS,\n BS, BS, BS, BS,\n EM, EM, EM,\n WS, WS, WS, WS,\n WS, WS, WS,\n WS, WS, WS, WS\n ]\n self.curr_player = WHITE_PLAYER", "def create_board(self):\n # # empty 7x7 board\n # board = [[list() for x in range(7)] for y in range(7)]\n # # coordinates of starting marbles\n # black = [[0, 0], [1, 0], [1, 1], [0, 1], [6, 6], [6, 5], [5, 5], [5, 6]]\n # white = [[6, 0], [6, 1], [5, 1], [5, 0], [0, 6], [0, 5], [1, 5], [1, 6]]\n # red = [[1, 3], [2, 2], [2, 3], [2, 4], [3, 1], [3, 2], [3, 3], [3, 4], [3, 5], [4, 2], [4, 3], [4, 4], [5, 3]]\n # for marble in white:\n # board[marble[0]][marble[1]] = \"B\"\n # for marble in black:\n # board[marble[0]][marble[1]] = \"W\"\n # for marble in red:\n # board[marble[0]][marble[1]] = \"R\"\n # return board\n pass", "def __init__(self):\n\n self._board = list()\n self._palace_board_blue = ['d9', 'e8', 'e10', 'f9']\n self._palace_board_red = ['d2', 'e1', 'e3', 'f2']\n self._palace_diagonal_blue = ['d8', 'd10', 'e9', 'f8', 'f10']\n self._palace_diagonal_red = ['d1', 'd3', 'e2', 'f1', 'f3']\n self._board_columns = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']\n self._board_rows = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']\n self._general_position_blue = 'e9'\n self._general_position_red = 'e2'\n\n self.setup_board()", "def init_board(self):\n\n self.__board = dict()\n order = ['rook', 'knight', 'bishop', 'queen', 'king', 'bishop',\n 'knight', 'rook']\n for j, name in enumerate(order):\n\n self.__board[(0, j)] = ChessGame.Piece( name, ChessGame.WHITE)\n self.__board[(7, j)] = ChessGame.Piece( name, ChessGame.BLACK)\n self.__board[(1, j)] = ChessGame.Piece('pawn', ChessGame.WHITE)\n self.__board[(6, j)] = ChessGame.Piece('pawn', ChessGame.BLACK)\n\n self.__players = { ChessGame.WHITE: set(), ChessGame.BLACK: set() }\n for color in (ChessGame.BLACK, ChessGame.WHITE):\n self.__players[color] = {(x, y) for (x, y), piece in\n self.__board.iteritems() if piece.color == color }\n\n return", "def __init__(self):\n self._board = []\n for i in range(10):\n self._board.append([None for i in range(9)])\n self.place_pieces()", "def __init__(self, rows=6, columns=7, win_length=4):\n\n self._board = [[0 for i in xrange(columns)] for i in xrange(rows)]\n self._rows = rows\n self._columns = columns\n self._win_length = win_length\n self.current_player = None\n self.winner = None\n print \"The game is afoot!\"", "def __init__(self, width=7, height=6):\n self.width = width\n self.height = height\n self.board = self.createBoard()", "def __init__(self, size, board):\n self.BoardSize = size #the size of the board\n self.CurrentGameBoard= board #the current state of the game board", "def __init__(self):\n # The starting counts are set to 0 and modified when the board is initiated.\n self.num_black_pieces = 0\n self.num_black_kings = 0\n self.num_white_pieces = 0\n self.num_white_kings = 0\n # Creates a new board and fills it with the appropriate pieces.\n self.board = self._initiate_board()\n self.moves = []", "def __init__(self):\n\t\tself.current = Piece.EX\n\t\tself.board = [Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK]", "def setUp(self):\n self.gameBoard = Grid((100, 100), Cell)", "def __init__(self):\n self.game_board = [' '] * 9\n self.size = len(self.game_board)\n self.move = 'X'\n self.player1 = None\n self.player2 = None\n self.current_player = None\n self.board_coords = {\n (1, 3): 0, (2, 3): 1, (3, 3): 2,\n (1, 2): 3, (2, 2): 4, (3, 2): 5,\n (1, 1): 6, (2, 1): 7, (3, 1): 8\n }\n\n self.winning_cases = [\n (0, 1, 2), (3, 4, 5), (6, 7, 8),\n (0, 3, 6), (1, 4, 7), (2, 5, 8),\n (0, 4, 8), (2, 4, 6)\n ]", "def set_pieces(self):\n\n for i in range(len(self._game_board)):\n\n # Row 1\n if i == 0:\n for ii in range(len(self._game_board[i])):\n if ii == 0 or ii == 8:\n self._game_board[i][ii] = Chariot(\"black\", \"BCHA\")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Horse(\"black\", \" BH \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 2 or ii == 6:\n self._game_board[i][ii] = Elephant(\"black\", \" BE \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 3 or ii == 5:\n self._game_board[i][ii] = Advisor(\"black\", \" BA \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 4:\n self._game_board[i][ii] = General(\"black\", \" BG \")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 3\n if i == 2:\n for ii in range(len(self._game_board[i])):\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Cannon(\"black\", \"BCAN\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 4\n if i == 3:\n for ii in range(len(self._game_board[i])):\n if ii % 2 == 0:\n self._game_board[i][ii] = Soldier(\"black\", \"BSOL\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 7\n if i == 6:\n for ii in range(len(self._game_board[i])):\n if ii % 2 == 0:\n self._game_board[i][ii] = Soldier(\"red\", \"RSOL\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 8\n if i == 7:\n for ii in range(len(self._game_board[i])):\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Cannon(\"red\", \"RCAN\")\n self._game_board[i][ii].update_location([i, ii])\n\n # Row 10\n if i == 9:\n for ii in range(len(self._game_board[i])):\n if ii == 0 or ii == 8:\n self._game_board[i][ii] = Chariot(\"red\", \"RCHA\")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 1 or ii == 7:\n self._game_board[i][ii] = Horse(\"red\", \" RH \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 2 or ii == 6:\n self._game_board[i][ii] = Elephant(\"red\", \" RE \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 3 or ii == 5:\n self._game_board[i][ii] = Advisor(\"red\", \" RA \")\n self._game_board[i][ii].update_location([i, ii])\n if ii == 4:\n self._game_board[i][ii] = General(\"red\", \" RG \")\n self._game_board[i][ii].update_location([i, ii])", "def make_board():\n return [[0 for i in range(8)] for i in range(8)]", "def init_board(rows, columns, method=\"random\"):\n if method == \"random\":\n board = np.random.random_integers(2, size=(rows, columns)) - 1\n return board", "def init_new_board(self) -> None:\r\n\r\n TkState.enable(self.settings_menu.winfo_children())\r\n TkState.enable(self.edit_menu.winfo_children())\r\n TkState.enable([self.play_button, self.step_button])\r\n TkState.disable([self.reset_button])\r\n\r\n self.gen_number.config(text = 0)\r\n self.speed_scale.set(self.INITIAL_TIME_PER_GEN)\r\n self.zoom_scale.set(self.INITIAL_ZOOM)\r\n\r\n self.animator.board = self.anim_board\r\n self.painter.board = self.anim_board\r\n self.painter.adjust_to_canvas()", "def __init__(self):\n self.game_screen = pygame.display.set_mode((GameData.screen_dim, GameData.screen_dim))\n self.game_screen.fill(GameData.background_color)\n self.player = 1\n self.game_over = False\n self.board = np.zeros((GameData.rows, GameData.columns))", "def __init__(self):\n self.board = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n self.last_move = None", "def __init__(self, dim, reverse = False, board = None):\n self.empty_squares = []\n if board == None:\n self.board = [[\"\", \"\", \"\"],\n [\"\", \"\", \"\"],\n [\"\", \"\", \"\"]]\n self.dim = dim\n self.board = board\n self.reverse = reverse\n self.win = None\n self.DRAW = 4\n self.EMPTY = 1\n self.PLAYERO = 2\n self.PLAYERX = 3", "def __init__(self):\n self.boards = [[False, False, False, False, False, False, False, False, False],\n [False, False, False, False, False, False, False, False, False],\n [False, False, False, False, False, False, False, False, False]]", "def __init__(self):\n self.boards = [[False, False, False, False, False, False, False, False, False],\n [False, False, False, False, False, False, False, False, False],\n [False, False, False, False, False, False, False, False, False]]", "def __init__(self,m,n):\n self.columns = m\n self.rows = n\n self.board = makeBoard(m,n)", "def initialize_board(self):\n seed = self.seed and self.seed.any()\n if not (self.shape or seed):\n raise Exception(\"Either a shape or a seed is required.\")\n\n elif self.shape and seed:\n # Center the seed on a game board\n board = self._center_seed(self.shape, self.seed)\n\n elif self.shape:\n # The probability a cell starts off dead\n prob_dead = [1 - self.game.weight]\n # Class probabilities for live cells\n probs_alive = [self.game.weight * (1/self.classes)] * self.classes\n\n board = np.random.choice(\n self.classes + 1,\n np.prod(self.shape),\n p = prob_dead + probs_alive\n ).reshape(self.shape)\n \n else: # Only a seed is given\n self.shape = self.seed.shape\n board = self.seed\n\n self.array = board\n self.start_array = board\n self.prev_array = None", "def __init__(self):\n self._game_state = \"UNFINISHED\"\n self._current_player = \"BLACK\"\n self._game_board = Board()", "def setupBoard(self):\n\t\tfor x in range(8):\n\t\t\tfor y in range(8):\n\t\t\t\tif x % 2 == 0:\n\t\t\t\t\tif y % 2 == 0:\n\t\t\t\t\t\tself.button[x][y] = tk.Button(\n\t\t\t\t\t\t\tself,\n\t\t\t\t\t\t\timage=self.BlackSquareImage,\n\t\t\t\t\t\t\tcommand=lambda x=x, y=y: self.recordInput(x, y),\n\t\t\t\t\t\t)\n\t\t\t\t\t\t# self.board[x][y] = Pieces.Piece(\"b\")\n\t\t\t\t\t\tself.button[x][y].grid(row=x, column=y)\n\t\t\t\t\telif y % 2 == 1:\n\t\t\t\t\t\tself.button[x][y] = tk.Button(\n\t\t\t\t\t\t\tself,\n\t\t\t\t\t\t\timage=self.WhiteSquareImage,\n\t\t\t\t\t\t\tcommand=lambda x=x, y=y: self.recordInput(x, y),\n\t\t\t\t\t\t)\n\t\t\t\t\t\t# self.board[x][y] = Pieces.Piece(\"w\")\n\t\t\t\t\t\tself.button[x][y].grid(row=x, column=y)\n\t\t\t\tif x % 2 == 1:\n\t\t\t\t\tif y % 2 == 1:\n\t\t\t\t\t\tself.button[x][y] = tk.Button(\n\t\t\t\t\t\t\tself,\n\t\t\t\t\t\t\timage=self.BlackSquareImage,\n\t\t\t\t\t\t\tcommand=lambda x=x, y=y: self.recordInput(x, y),\n\t\t\t\t\t\t)\n\t\t\t\t\t\t# self.board[x][y] = Pieces.Piece(\"b\")\n\t\t\t\t\t\tself.button[x][y].grid(row=x, column=y)\n\t\t\t\t\telif y % 2 == 0:\n\t\t\t\t\t\tself.button[x][y] = tk.Button(\n\t\t\t\t\t\t\tself,\n\t\t\t\t\t\t\timage=self.WhiteSquareImage,\n\t\t\t\t\t\t\tcommand=lambda x=x, y=y: self.recordInput(x, y),\n\t\t\t\t\t\t)\n\t\t\t\t\t\t# self.board[x][y] = Pieces.Piece(\"w\")\n\t\t\t\t\t\tself.button[x][y].grid(row=x, column=y)", "def __init__(self, players):\n\n # Define the players\n self.players = players\n\n # Define who starts the game\n self.nplayer = 1 \n\n # Define the board\n self.board = [0] * 9", "def __init__(self, num_rows = 4, num_cols = 4,\n first_mover = \"W\", top_left = \"B\",\n how_to_win = \">\", initial_config=[]):\n # initial_config was made for AI Othello to\n # get around pass-by-reference behavior of lists.\n if (4 > num_rows > 16) or num_rows % 2 != 0:\n raise Exception\n else:\n self._num_rows = num_rows\n if (4 > num_cols > 16) or num_cols % 2 != 0:\n raise Exception\n else:\n self._num_cols = num_cols\n if first_mover != \"B\" and first_mover != \"W\":\n raise Exception\n else:\n self._turn = first_mover\n if top_left != \"B\" and top_left != \"W\":\n raise Exception\n else:\n self._top_left = top_left\n if how_to_win != \">\" and how_to_win != \"<\":\n raise Exception\n else:\n self._how_to_win = how_to_win\n\n if initial_config == []:\n self._board = self._make_board(num_rows, num_cols, top_left)\n else:\n self._board = deepcopy(initial_config)\n \n self._game_over = False\n self._winner = \" \"\n self._tl_cell = (0, 0)\n self._tr_cell = (0, num_cols-1)\n self._bl_cell = (num_rows-1, 0)\n self._br_cell = (num_rows-1, num_cols-1)\n self._ls_cells = [(c, 0) for c in range(1, num_rows-1)]\n self._rs_cells = [(c, num_cols-1) for c in range(1, num_rows-1)]\n self._ts_cells = [(0, c) for c in range(1, num_cols-1)]\n self._bs_cells = [(num_rows-1, c) for c in range(1, num_cols-1)]\n #^Note how ranges start from 1 and go to num_rows-1 to avoid corners,\n #which are processed differently", "def init_game(self):\n nrows = len(self.array)\n self.game_over = False\n self.squares_left = nrows * nrows\n self.bombs_left = 0\n # clear the board\n for i in xrange(nrows):\n for j in xrange(nrows):\n self.array[i][j].reset()\n # put N random bombs\n for i in xrange(nrows):\n rand_num = random.randrange(nrows*nrows)\n if self.array[rand_num / nrows][rand_num % nrows].type \\\n != SquareType.BOMB:\n self.insert_bomb(rand_num / nrows, rand_num % nrows)\n self.squares_left -= self.bombs_left\n self.print_board()", "def __init__(self, board):\n self.board = board", "def __init__(self):\n self.board_dict = dict()\n for i in range(self.BOARD_WIDTH):\n for j in range(self.BOARD_WIDTH):\n self.board_dict[i, j] = 0, None\n\n self.players_locations = dict()\n self.last_moved = None", "def reset(self):\n # replace with your code\n self.board = [[0 for dummy_index in range(self.grid_width)] for dummy_inner_index in range(self.grid_height)]", "def __init__(self):\n self._board_area = [[\" \" for i in range(20)] for j in range(20)]\n\n # Starting setup for board includes these coordinates black, and their mirror white\n black_start = [(1, 2), (2, 2), (2, 1), (2, 3), (3, 2), (4, 1), (4, 3), (5, 2), (6, 1), (6, 3), (7, 1),\n (7, 2), (7, 3), (8, 1), (8, 2), (8, 3), (9, 1), (9, 2), (9, 3), (10, 1), (10, 2), (10, 3),\n (11, 1), (11, 3), (12, 1), (12, 2), (12, 3), (13, 1), (13, 3), (14, 2), (15, 1), (15, 3),\n (16, 2), (17, 1), (17, 2), (17, 3), (18, 2), (2, 6), (5, 6), (8, 6), (11, 6),\n (14, 6), (17, 6)]\n\n # Border points set for clearing out stones that move beyond the border\n self._border = set((0, i) for i in range(20)) | set((19, i) for i in range(20))\n self._border = self._border | set((i, 0) for i in range(20)) | set((i, 19) for i in range(20))\n\n # Fill black and white stones\n for coord in black_start:\n self._board_area[coord[0]][coord[1]] = \"B\"\n self._board_area[coord[0]][-coord[1] - 1] = \"W\"\n\n # Alphabetic indexing of board for alpha-numeric movement inputs\n self._locmap = dict(zip(\"abcdefghijklmnopqrst\", range(20)))", "def setup(self):\n self.board[(3, 3)] = -1\n self.board[(3, 4)] = -1\n self.board[(4, 3)] = 1\n self.board[(4, 4)] = 1\n\n self.stones_set = 4", "def __init__(self, is_debug):\n super(Board, self).__init__(BOARD_SIZE, 6.5, is_debug=is_debug)\n self.outline = pygame.Rect(GRID_SIZE+5, GRID_SIZE+5, DRAW_BOARD_SIZE[0]-GRID_SIZE*2, DRAW_BOARD_SIZE[1]-GRID_SIZE*2)\n self.draw_board()", "def __init__(self):\n self._current_state = \"UNFINISHED\"\n self._start_color = \"RED\"\n self._board = Board()", "def __init__(self, initial_board, num_of_iterations=None, boundary=BoardBoundaries.HARD_BORDERS):\n self.initial_board = initial_board\n self.reset_board()\n self.num_iterations = num_of_iterations\n self.boundary = boundary", "def __init__(self, width, height, player, opponent):\r\n self.height = height\r\n self.width = width\r\n self.board = []\r\n for x in range(height):\r\n self.board.append([])\r\n for y in range(width):\r\n self.board[x].append(\"\\t\")\r\n self.player = player\r\n self.opponent = opponent\r\n self.lastMove = (0, 0)", "def __init__(self, board=None):\n self.winner = None\n self.board = board or [self.__class__.EMPTY_POSITION_COUNTER] * 9", "def create_initial_grid():\n\n\tgrid = {(x, y) : ' + ' for x in range(8) for y in range(8)}\n\n\t# Define initial positions \n\tgrid[(3,3)] = colors.RED + \"[I]\" + colors.STOP\n\tgrid[(4,3)] = colors.GREEN + \"[A]\" + colors.STOP\n\tgrid[(3,4)] = colors.GREEN + \"[A]\" + colors.STOP\n\tgrid[(4,4)] = colors.RED + \"[I]\" + colors.STOP\n\n\treturn grid", "def setUp(self):\n\n self.board = Board(3, 3)", "def boardInit(self, width: int, height: int, bombs: int):\n # New board\n self.width = width\n self.height = height\n self.bombs = bombs\n self.board = minesweepergame.game(width, height, bombs)\n\n # New board items\n self.canvas.delete(*self.canvas.find_all())\n self.canvassquares: List[List[Optional[int]]] = [[None for y in range(\n self.board.height)] for x in range(self.board.width)] # Store the `_CanvasItemId`s\n self.canvasicons: List[List[Optional[int]]] = [[None for y in range(\n self.board.height)] for x in range(self.board.width)] # Store the `_CanvasItemId`s\n self.victoryMessage: Optional[int] = None\n\n # Render\n self.render()", "def new_game(self):\n self.board = [None] * 9\n self.player = \"X\"\n self.winner = None", "def new_board(self):\n\n # delete all objects\n self.canvas.delete('all')\n\n # reset\n self.board = [\n [self.EMPTY, self.EMPTY, self.EMPTY],\n [self.EMPTY, self.EMPTY, self.EMPTY],\n [self.EMPTY, self.EMPTY, self.EMPTY]]\n\n # draw grid\n for n in range(1, 3):\n # vertical\n self.canvas.create_line(\n self.CELL_SIZE*n, 0,\n self.CELL_SIZE*n, self.WINDOW_SIZE,\n width=self.GRID_LINE_WIDTH, fill=self.GRID_COLOR)\n # horizontal\n self.canvas.create_line(\n 0, self.CELL_SIZE*n,\n self.WINDOW_SIZE, self.CELL_SIZE*n,\n width=self.GRID_LINE_WIDTH, fill=self.GRID_COLOR)", "def __init__(self, width, height):\n # IMPLEMENT ME\n self.width = width\n self.height = height\n self.board = []\n for i in range(height):\n row = []\n for j in range(width):\n row.append(\".\")\n self.board.append(row)", "def __init__(self):\n self.board = {} # dict of (x,y) to PlacedTile\n self.board[(0,0)] = STARTING_PIECE", "def make_board():\n board = bingo_numbers()\n board[2][2] = ''\n return board", "def your_board():\n board = [\n [5, 3, 0, 0, 7, 0, 0, 0, 0],\n [6, 0, 0, 1, 9, 5, 0, 0, 0],\n [0, 9, 8, 0, 0, 0, 0, 6, 0],\n [8, 0, 0, 0, 6, 0, 0, 0, 3],\n [4, 0, 0, 8, 0, 3, 0, 0, 1],\n [7, 0, 0, 0, 2, 0, 0, 0, 6],\n [0, 6, 0, 0, 0, 0, 2, 8, 0],\n [0, 0, 0, 4, 1, 9, 0, 0, 5],\n [0, 0, 0, 0, 8, 0, 0, 7, 9]\n ]\n return board", "def __init__(self):\n self.rows = [18, 23, 24, 25]\n self.cols = [17, 27, 22]\n self.keypad = [\n [\"1\", \"2\", \"3\"],\n [\"4\", \"5\", \"6\"],\n [\"7\", \"8\", \"9\"],\n [\"*\", \"0\", \"#\"]\n ]\n self.setup()", "def resetBoard(self):\n\t\tself.board = np.zeros((self.boardSize,self.boardSize))", "def resetBoard(self):\n\t\tself.board = np.zeros((self.boardSize,self.boardSize))", "def __init__(self, size):\n\t\tself.size = size\n\t\tself.board = []\n\t\tnew = []\n\t\tfor i in range(0, size, 1):\n\t\t\tfor j in range(0, size, 1):\n\t\t\t\tnew.append(0)\n\t\t\tself.board.append(new)\n\t\t\tnew = []", "def new_board() -> list:\n board = []\n for _ in range(BOARDHEIGHT):\n board.append([BLANK] * BOARDWIDTH)\n\n return board", "def initializer():\n size: int = int(input(\"Enter a number for the board size: \"))\n board: List[List[str]] = [[random.choice([\"X\", \"O\", \" \"]) for x in range(size)] for y in\n range(size)]\n return size, board", "def __init__(self) -> None:\n self.row = 6\n self.col = 7\n self.grid = []\n\n for y in range(self.row):\n temp_row = []\n for x in range(self.col):\n temp_row.append(\" \")\n self.grid.append(temp_row)", "def create(self):\n\n for i in range(8):\n # Create white pawns\n self.board[1][i] = Piece(\"pawn\", 1, i, 0)\n # Create black pawns\n self.board[6][i] = Piece(\"pawn\", 6, i, 1)\n\n # Create white rooks\n self.board[0][0] = Piece(\"rook\", 0, 0, 0)\n self.board[0][7] = Piece(\"rook\", 0, 7, 0)\n\n # Create black rooks\n self.board[7][0] = Piece(\"rook\", 7, 0, 1)\n self.board[7][7] = Piece(\"rook\", 7, 7, 1)\n\n # Create white knights\n self.board[0][1] = Piece(\"knight\", 0, 1, 0)\n self.board[0][6] = Piece(\"knight\", 0, 6, 0)\n\n # Create black knights\n self.board[7][1] = Piece(\"knight\", 7, 1, 1)\n self.board[7][6] = Piece(\"knight\", 7, 6, 1)\n\n # Create white bishop\n self.board[0][2] = Piece(\"bishop\", 0, 2, 0)\n self.board[0][5] = Piece(\"bishop\", 0, 5, 0)\n\n # Create black bishop\n self.board[7][2] = Piece(\"bishop\", 7, 2, 1)\n self.board[7][5] = Piece(\"bishop\", 7, 5, 1)\n\n # Create white queen and king\n self.board[0][3] = Piece(\"queen\", 0, 3, 0)\n self.board[0][4] = Piece(\"king\", 0, 4, 0)\n\n # Create black queen and king\n self.board[7][3] = Piece(\"queen\", 7, 3, 1)\n self.board[7][4] = Piece(\"king\", 7, 4, 1)", "def __init__(self):\n\n self._board = Board()", "def __init__(self,side:int)->None:\n self._side=side\n self._chessboard=[[0 for col in range(self._side)] for row in range(self._side)]", "def _make_board(self, rows, cols, top_left):\n board = []\n for i in range(rows):\n board.append([])\n for j in range(cols):\n board[-1].append(\" \")\n\n top_left_row = math.floor(self._num_rows/2)\n top_left_col = math.floor(self._num_cols/2)\n if top_left == \"B\":\n board[top_left_row-1][top_left_col-1] = top_left\n board[top_left_row-1][top_left_col+1-1] = \"W\" \n board[top_left_row+1-1][top_left_col-1] = \"W\" \n board[top_left_row+1-1][top_left_col+1-1] = \"B\" \n elif top_left == \"W\":\n board[top_left_row-1][top_left_col-1] = top_left \n board[top_left_row-1][top_left_col+1-1] = \"B\" \n board[top_left_row+1-1][top_left_col-1] = \"B\" \n board[top_left_row+1-1][top_left_col+1-1] = \"W\"\n\n return board", "def generate_board(self):\n new_board = []\n for i in range(self.size):\n new_board.append([\"0\"] * self.size)\n return new_board", "def init_game():\n return BoardRenderer('LifeSim', GRID_SIZE, BLOCK_SIZE), World(GRID_SIZE, LAKE_SIZE, FOREST_WIDTH)", "def reset(self):\r\n self.board = [[0 for i in range(self.width)]\r\n for i in range(self.height)]\r\n self.new_tile()\r\n self.new_tile()", "def __init__(self, board):\n self.running = True\n self.state = \"waiting\"\n pygame.init()\n pygame.display.set_caption(\"Sudoku Solver\")\n\n self.define_grid()\n self.define_number_positions()\n self.define_button()\n self.board = board\n self.font = pygame.font.Font('ubuntu.ttf', NUMBERS_SIZE)\n self.sleep_time = 1 / CHANGES_PER_SECOND\n\n self.original_board = board.copy()", "def __init__(self, n: int):\n self.n = n\n self.board = [[0 for _ in range(n)] for _ in range(n)]", "def _build_board(y_size, x_size, game_board):\n\n for y_coordinate in range(1, y_size + 1):\n for x_coordinate in range(1, x_size + 1):\n game_board[(x_coordinate, y_coordinate)] = {0: {}, 1: {}, 2: {}}", "def get_board(self):\n pass", "def __init__(self, board, turn):\n self.player = turn\n self.roll = self.roll_dice()\n #array of applied board states\n self.moves = []\n self.board = board\n self.generate_valid_moves()", "def __init__(self, size):\n self.size = size\n self.num_queens_placed = 0\n self.board = self.generate_board()", "def create_board(self, size):\n self.board = [\n [FieldState.EMPTY for _ in range(size)]\n for _ in range(size)\n ]", "def resetBoard(self):\n self.space1 = 0\n self.space2 = 0\n self.space3 = 0\n self.space4 = 0\n self.space5 = 0\n self.space6 = 0", "def test_initial_board():\n board = Board()\n\n for i in range(board.get_grid_size()[0]):\n for j in range(board.get_grid_size()[1]):\n assert board.get_token(i, j) == Board.EMPTY", "def __init__(self,board,mode):\n self.bluh=[]\n self.board=board\n self.mode=mode", "def setup(match):\r\n match.height = constants.HEIGHT\r\n match.width = constants.WIDTH\r\n match.board = makeBoard(match.height, match.width)\r\n match.seedlist = constants.SEEDLIST\r\n if match.seedlist == None:\r\n match.seedlist=randseed(match.height, match.width)\r\n numofrow = len(match.seedlist[0]) #opposite notation because column-major not row-major\r\n for x in match.seedlist:\r\n match.board[x[0]][x[1]] = constants.ALIVE\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n # TO DO:\r\n # Initialize board according to match.seedlist\r", "def init_game_setting(self):\n ##################\n # YOUR CODE HERE #\n ##################\n self.state = np.zeros((1, 80, 80))\n self.clear_action()", "def draw_board(self):\n board = \"-------------------\\n\"\n board += \"| %s | %s | %s |\\n\" % (self.board_values[1], self.board_values[2], self.board_values[3])\n board += \"-------------------\\n\"\n board += \"| %s | %s | %s |\\n\" % (self.board_values[4], self.board_values[5], self.board_values[6])\n board += \"-------------------\\n\"\n board += \"| %s | %s | %s |\\n\" % (self.board_values[7], self.board_values[8], self.board_values[9])\n board += \"-------------------\\n\"\n return board" ]
[ "0.84164816", "0.81662166", "0.7968856", "0.77619267", "0.7733716", "0.77081436", "0.7704614", "0.7676403", "0.75649804", "0.75117373", "0.75034124", "0.7497264", "0.7476524", "0.7459026", "0.74174595", "0.7414584", "0.738848", "0.73879623", "0.73621136", "0.73618233", "0.7344797", "0.73386407", "0.73283553", "0.732771", "0.7281164", "0.7280197", "0.7277582", "0.7272522", "0.7237704", "0.7210596", "0.71639687", "0.713045", "0.71226645", "0.71114", "0.71093816", "0.71092373", "0.70971805", "0.7059108", "0.7046911", "0.703808", "0.7025861", "0.7021503", "0.70143265", "0.7012858", "0.69997936", "0.6996081", "0.6996081", "0.6963488", "0.69492596", "0.69491553", "0.69292146", "0.6900746", "0.68981385", "0.68874407", "0.6886768", "0.68847996", "0.6871527", "0.6870871", "0.68660593", "0.6829007", "0.6787637", "0.6785075", "0.67787474", "0.67767566", "0.6769817", "0.6769372", "0.67482835", "0.67170274", "0.67118114", "0.6707794", "0.6703215", "0.66911775", "0.6675568", "0.666057", "0.6655795", "0.6655795", "0.66341466", "0.66272974", "0.6611868", "0.6608206", "0.6608055", "0.660296", "0.6598973", "0.6595204", "0.6589306", "0.65872574", "0.65730166", "0.6571897", "0.6562395", "0.65599847", "0.65543956", "0.6550719", "0.654842", "0.65439343", "0.6531703", "0.6526099", "0.65255165", "0.6519965", "0.65187013", "0.65138465" ]
0.83401465
1
This function finds possibilities for a player on a board
def possible(state_board,turn): legal_moves = [] # list of legal moves as Move objects for i in range(1,9): for j in range(1,9): if state_board[i][j] == 0: if flipper([i,j],turn,state_board) != []: # if there are flipped pieces, it appends this move to # the legal moves and draws it in light greens legal_moves.append((i,j)) drawPiece((i,j),3) else: # if it is 0 and is not legal, make sure it is of bgcolor drawPiece((i,j),0) return legal_moves
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def possibilities(board):\n return board[np.where(board == 0)]", "def _check_winning_combinations(board, player):\n winning_combinations = (\n ((0, 0), (0, 1), (0, 2)),\n ((1, 0), (1, 1), (1, 2)),\n ((2, 0), (2, 1), (2, 2)),\n ((0, 0), (1, 0), (2, 0)),\n ((0, 1), (1, 1), (2, 1)),\n ((0, 2), (1, 2), (2, 2)),\n ((0, 0), (1, 1), (2, 2)),\n ((0, 2), (1, 1), (2, 0))\n )\n\n if any(combination for combination in winning_combinations if _is_winning_combination(board, combination, player)):\n return player\n\n return None", "def isSolved(board):\n for player in [1, 2]:\n if [player]*3 in chain(\n board, # Rows\n zip(board), # Columns\n [ # Diagonals\n [board[i][i] for i in range(len(board))],\n [board[len(board) - i - 1][i] for i in range(len(board))]\n ]\n ):\n return player\n return -1 if 0 in chain(*board) else 0", "def get_possible_moves(board):\n\tpossible_moves = []\n\n\tfor count, player in enumerate(board):\n\t\tif player is not server_player and player is not user_player:\n\t\t\tpossible_moves.append(count)\n\n\treturn possible_moves", "def legal_moves(self, player, board):\r\n #go through the whole board and check whether the piece is on the board or not\r\n #num/row size - num%col == num2/row size - num@%col\r\n #num/row size + num%col\r\n moves = list()\r\n opp = self.opponent(player)\r\n #print(board)\r\n for i in self.squares():\r\n if board[i] == core.EMPTY:\r\n for d in core.DIRECTIONS:\r\n endPt = self.find_bracket(i, player, board, d)\r\n if endPt!= None:\r\n moves.append(i)\r\n break\r\n\r\n return moves", "def check_victory(board):\n\n for row in range(HEIGHT):\n for col in range(WIDTH):\n\n player = board[row][col]\n\n # not a player move\n if player == 0 or player == 9:\n continue\n\n # look right\n if col + 3 < WIDTH and player == board[row][col + 1] and player == board[row][col + 2]\\\n and player == board[row][col + 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n if row + 3 < HEIGHT:\n\n # down\n if player == board[row + 1][col] and player == board[row + 2][col] and player == board[row + 3][col]:\n if player == 1:\n return +1\n else:\n return -1\n\n # down and right\n if col + 3 < WIDTH and player == board[row + 1][col + 1] and player == board[row + 2][col + 2]\\\n and player == board[row + 3][col + 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n # down and left\n if col - 3 >= 0 and player == board[row + 1][col - 1] and player == board[row + 2][col - 2] \\\n and player == board[row + 3][col - 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n\n # # if no one has won yet\n for row in range(HEIGHT):\n for col in range(WIDTH):\n if board[row][col] == 0 or board[row][col] == 9:\n return None\n\n return 0", "def player_choices(self, player):\n player_choices = []\n for i in range(self.quadrants_count):\n quadrant_board = self.play_area[i].get_board()\n for j in range(self.quadrant_positions_count):\n if quadrant_board[j] == player:\n position = j + 1 + i * 9\n player_choices.append(position)\n return player_choices", "def player(board):\n\n count_x = 0\n count_o = 0\n for i in board:\n for j in i:\n if (j == X):\n count_x += 1\n elif (j == O):\n count_o += 1\n if (count_x <= count_o):\n return X\n else:\n return O", "def player(board):\n num_x = sum([list.count(X) for list in board])\n num_o = sum([list.count(O) for list in board])\n if num_x == num_o:\n return X\n else:\n return O", "def player(board):\n\n if terminal(board):\n return 7\n\n numX = 0\n numO = 0\n\n for i in board:\n for j in i:\n if j == X:\n numX = numX + 1\n elif j == O:\n numO = numO + 1\n\n if numX == numO:\n return X\n else:\n return O", "def legal_moves(board,player=None):\r\n \r\n possible_moves = []\r\n moves = []\r\n if player == None:\r\n moves += board.white + board.black\r\n elif player == -1:\r\n moves += board.black\r\n elif player == 1:\r\n moves += board.white\r\n \r\n captured = False\r\n for pos in moves:\r\n if pos[0] == 'A':\r\n m = [-8,-7,1,8,9]\r\n elif pos[0] == 'H':\r\n m = [-9,-8,-1,7,8]\r\n else:\r\n m = [-9,-8,-7,-1,1,7,8,9]\r\n loc = decode(pos)\r\n for i in m:\r\n captured = capture(board, player, possible_moves, pos, loc, i)\r\n canter(board, player, possible_moves, pos, loc, i)\r\n plain(board, player, possible_moves, pos, loc, i)\r\n \r\n if captured:\r\n enemy_list = []\r\n for capturing_move in possible_moves:\r\n if len(capturing_move) == 3:\r\n enemy_list.append(capturing_move)\r\n possible_moves = list(enemy_list)\r\n\r\n return possible_moves", "def player(board):\n total = 0\n for i in range(len(board)):\n for j in range(len(board)):\n total = total + utility_map[board[i][j]]\n\n # If they cancel out then equal number so X's turn\n if total == 0:\n return X\n else:\n return O", "def play_strategic_game():\n board, winner = create_board(), 0\n board[1,1] = 1\n while winner == 0:\n for player in [2,1]:\n board = random_place(board, player)\n winner = evaluate(board)\n if winner != 0:\n break\n return winner", "def player(board):\n X_count = 0\n O_count = 0\n\n for row in board:\n X_count += row.count(X)\n O_count += row.count(O)\n\n if X_count <= O_count:\n return X\n else:\n return O", "def getPossibleMovesOutCheck(player, board):\n possibleMoves = []\n for col in board:\n for piece in col:\n if type(piece) != int and piece.player == player:\n pieceMoves = piece.availableMoves(board)\n piecePosX = piece.posx\n piecePosY = piece.posy\n for move in pieceMoves:\n copyBoard = copy.deepcopy(board)\n #Do the move on the copied board\n copyBoard[piecePosX][piecePosY] = 1\n newPiece = copy.deepcopy(piece)\n newPiece.posx = move[0]\n newPiece.posy = move[1]\n copyBoard[move[0]][move[1]] = newPiece\n #Check if the copied board is now in check...\n #if not then it is a valid move to do to get out of check\n if not isInCheck(player, copyBoard):\n possibleMoves.append(((piecePosX,piecePosY), move))\n\n return possibleMoves", "def player(board):\n moves = 0\n\n for row in range(len(board)):\n for column in range(len(board[row])):\n if board[row][column] != EMPTY:\n moves+=1\n\n if moves % 2 == 0:\n return X\n return O\n\n # raise NotImplementedError", "def player(board):\n count = 0\n for row in range(len(board)):\n for col in range(len(board[row])):\n if board[row][col] == X or board[row][col] == O:\n count += 1\n\n if count % 2 == 0:\n return X\n else:\n return O", "def player(board):\n n_x,n_y=0,0\n for i in range(len(board)):\n for j in range(len(board[i])):\n if board[i][j]==X:\n n_x+=1\n elif board[i][j]==O:\n n_y+=1\n if n_x==n_y:\n return X\n elif n_x>n_y:\n return O\n raise NotImplementedError", "def legal_moves(player, board):\n return [sq for sq in Othello.squares() if Othello.is_legal(sq, player, board)]", "def piecesGenerator(self,player):\n for row in range(8):\n for col in range(8):\n if self.board[row][col] != None:\n piece,pos = self.pieceAt((row,col)) ,((row,col))\n if piece['player'] == player:\n yield piece,pos", "def is_in_check(self, player):\n # List of coords in board\n col = ['a','b','c','d','e','f','g','h','i'] # the columns\n a = []\n for i in range(10):\n a.append([j + str(i+1) for j in col])\n \n # Flatten the list\n board_coords = []\n for sublist in a:\n for coord in sublist:\n board_coords.append(coord)\n \n # getting each object in the board for a player\n pieces_coords = []\n pieces_left = []\n for row in range(10):\n for column in range(9):\n if self.get_board()[row][column] is not None and self.get_board()[row][column].get_color() == player.upper():\n # pieces left on the board for the player\n pieces_coords.append((row, column))\n pieces_left.append(self.get_board()[row][column])\n \n p_b_coord = (pieces_coords, board_coords)\n \n counter = 0 \n for piece_coord in pieces_coords: \n for board_coord in board_coords: \n translated_index = self.column_to_letter(piece_coord[1]) + str(piece_coord[0]) \n piece = self.get_piece_type(translated_index)\n if piece is not None:\n if piece.check_legal(translated_index, board_coord, self.get_board(), self.get_game_state()) == True:\n counter += 1\n print(counter)\n if counter == 0:\n self._current_state = upper(player) + '_WON'\n return True \n return False", "def get_all_moves(self, board, player):\n result = []\n for startx in range(8):\n for starty in range(8):\n for destx in range(8):\n for desty in range(8):\n if self.is_legal_move(board, [startx, starty], [destx, desty], player):\n result.append([[startx, starty], [destx, desty]])\n return result", "def player(board):\n count = 0\n rows = 3\n columns = 3\n for i in range(rows):\n for j in range(columns):\n if board[i][j] != EMPTY:\n count += 1\n if count % 2 == 0:\n player = X\n else:\n player = O\n return player\n\n #raise NotImplementedError", "def available_combinations(self):\n result = []\n\n for i in range(3):\n for j in range(3):\n if self.board[i][j] == 0:\n result.append((i, j))\n\n return result", "def count_chips(board, player):\n cont = 0\n for row in board:\n for col in row:\n if col == PLAYER_CHIPS[player]:\n cont += 1\n return cont", "def find_valid_posse(board: 'List') -> 'List':\n for i, a in enumerate(board):\n for j, b in enumerate(board):\n if j != i:\n for k, c in enumerate(board):\n if k not in (i, j) and \\\n is_valid_posse((a, b, c)):\n # print((i, j, k))\n return [a, b, c]", "def player(board):\n\n # Game is over\n if terminal(board):\n return None\n\n # Count number of occurences of X and O\n x_count = 0\n o_count = 0\n for row in board:\n for box in row:\n if box == X:\n x_count = x_count + 1\n elif box == O:\n o_count = o_count + 1\n # When move count is tied, X is next\n if x_count <= o_count:\n return X\n # When X has moved once more than O, next move is O\n else:\n return O", "def canter(board,player,possible_moves, pos, loc, i):\r\n \r\n next_piece = encode(loc+i) \r\n new_pos = loc + (i*2)\r\n \r\n if player == 1 and next_piece in board.white:\r\n if not((pos[0] == 'B') and ((next_piece)[0] == 'A')) and not((pos[0] == 'G') and ((next_piece)[0] == 'H')):\r\n new_enc = encode(new_pos)\r\n if new_pos in range(0,112) and (new_enc not in board.white) and (new_enc not in board.black) and (new_enc not in board.empty):\r\n possible_moves.append([pos,new_enc])\r\n if player == -1 and next_piece in board.black:\r\n if not((pos[0] == 'B') and ((next_piece)[0] == 'A')) and not((pos[0] == 'G') and ((next_piece)[0] == 'H')):\r\n new_enc = encode(new_pos)\r\n if new_pos in range(0,112) and (new_enc not in board.white) and (new_enc not in board.black) and (new_enc not in board.empty):\r\n possible_moves.append([pos,new_enc])", "def getWinner(board):\n players = [X, O]\n num_symbols_in_line = 3\n for player in players:\n # check rows\n for row in board:\n line_count = row.count(player)\n if line_count == num_symbols_in_line:\n return player\n \n # check columns\n for col_i in range(len(board[0])):\n line_count = 0\n for row_i in range(len(board)):\n if board[row_i][col_i] == player:\n line_count += 1\n if line_count == num_symbols_in_line:\n return player\n \n # check vertical from top left to bottom right\n line_count = 0\n for vert_cell in range(len(board)):\n if board[vert_cell][vert_cell] == player:\n line_count += 1\n if line_count == num_symbols_in_line:\n return player\n \n # check vertical from top right to bottom left\n line_count = 0\n col_i = len(board) - 1\n for row_i in range(len(board)):\n if board[row_i][col_i] == player:\n line_count += 1\n col_i -= 1\n if line_count == num_symbols_in_line:\n return player\n\n return None", "def check_boards(self):\n succesful = True\n marker = self.game.player\n print(f\"-----Starting check_winning_boards-----\")\n winning_boards= [\n [\n [marker]*3,\n [\" \"]*3,\n [\" \"]*3\n ],\n [\n [\" \"]*3,\n [marker]*3,\n [\" \"]*3\n ],\n [\n [\" \"]*3,\n [\" \"]*3,\n [marker]*3\n ],\n [\n [marker, \" \", \" \"],\n [marker, \" \", \" \"],\n [marker, \" \", \" \"]\n ],\n [\n [\" \",marker, \" \"],\n [\" \",marker, \" \"],\n [\" \",marker, \" \"]\n ],\n [\n [\" \", \" \",marker],\n [\" \", \" \",marker],\n [\" \", \" \",marker]\n ],\n [\n [marker, \" \", \" \"]\n ,[\" \", marker,\" \"],\n [\" \", \" \",marker]\n ],\n [\n [\" \", \" \", marker],\n [\" \",marker, \" \"],\n [marker, \" \", \" \"]\n ]\n ]\n for board in winning_boards:\n if self.game.check_win_conditions(board) != -10:\n succesful = False\n print(f\"board failed checkWins \\n{board}\")\n marker = self.game.ai_player\n print(f\"-----Starting check_winning_boards-----\")\n winning_boards= [\n [\n [marker]*3,\n [\" \"]*3,\n [\" \"]*3\n ],\n [\n [\" \"]*3,\n [marker]*3,\n [\" \"]*3\n ],\n [\n [\" \"]*3,\n [\" \"]*3,\n [marker]*3\n ],\n [\n [marker, \" \", \" \"],\n [marker, \" \", \" \"],\n [marker, \" \", \" \"]\n ],\n [\n [\" \",marker, \" \"],\n [\" \",marker, \" \"],\n [\" \",marker, \" \"]\n ],\n [\n [\" \", \" \",marker],\n [\" \", \" \",marker],\n [\" \", \" \",marker]\n ],\n [\n [marker, \" \", \" \"]\n ,[\" \", marker,\" \"],\n [\" \", \" \",marker]\n ],\n [\n [\" \", \" \", marker],\n [\" \",marker, \" \"],\n [marker, \" \", \" \"]\n ]\n ]\n for board in winning_boards:\n if self.game.check_win_conditions(board) != 10:\n succesful = False\n print(f\"board failed checkWins \\n{board}\")\n \n tie_boards = [\n [ \n [\"O\",\"O\",\"X\"],\n [\"X\",\"O\",\"O\"],\n [\"X\",\"X\",\" \"]\n ],\n [\n [\"O\",\"X\",\" \"],\n [\" \",\"X\",\" \"],\n [\" \",\"O\",\" \"]\n ],\n [\n ['O', 'O', 'X'],\n ['X', 'X', 'O'],\n ['O', 'O', 'X']\n ]\n ]\n for board in tie_boards:\n if self.game.check_win_conditions(board) != 0:\n succesful = False\n print(f\"board failed checkWins \\n{board}\")\n\n print(f\"-----Ending check_winning_boards-----\")", "def winner(board):\n chances = [X, O]\n for chance in chances:\n for row in range(3):\n if list(chance)*3 == board[row]:\n return chance\n for column in range(3):\n if [[chance] for i in range(3)] == [[board[row][column]] for row in range(3)]:\n return chance\n if board[0][0] == chance and board[1][1] == chance and board[2][2] == chance:\n return chance\n if board[0][2] == chance and board[1][1] == chance and board[2][0] == chance:\n return chance\n return None", "def show_possibles(self):\n for row in range(self.board_size):\n for col in range(self.board_size):\n poss = list(self.possibles[row][col])\n if poss:\n teil = qbwrdd.Tile(poss, self.board.scene)\n teil.cell = \"poss\"\n cell = row * self.board_size + col\n pos_x, pos_y = self.board.cells[cell].x(), self.board.cells[cell].y()\n if col % 3 > 0:\n pos_x += 2\n self.poss_tiles[row][col] = teil\n teil.draw_tile_at(pos_x, pos_y)", "def player(board):\n plays = 0\n\n # Count non-empty squares\n for i in range(3):\n for j in range(3):\n if board[i][j] != EMPTY:\n plays += 1\n\n # Even number of plays -> X's turn\n if plays % 2 == 0:\n return X\n else:\n return O", "def available_moves(board_state):\n for x, y in itertools.product(range(len(board_state)), range(len(board_state[0]))):\n if board_state[x][y] == 0:\n yield (x, y)", "def checkMoves(self,board):\n possibleMoves = []\n\n for c in xrange(0,8):\n for r in xrange(0,8):\n if board.isValidMove(self.tile,c,r):\n possibleMoves.append(c+r*8)\n\n return possibleMoves", "def player(board):\n if not terminal(board):\n cnt_x = 0\n cnt_o = 0\n for i in range(3):\n for j in range(3):\n if board[i][j] == EMPTY:\n continue\n elif board[i][j] == X:\n cnt_x += 1\n else:\n cnt_o += 1\n\n if cnt_x <= cnt_o:\n return X\n else:\n return O\n else:\n return None", "def chessboardGame(x, y):\n xin = x\n yin = y\n\n # These squares have no possible move, therefore, are losing;\n # we chose these squares by sight; while loop below expands these sets\n # until we encompass whole board\n # it was not clear to me in the beginning that every square has a unique\n # determinant ending under optimal play\n losing_start = set([(1, 1), (2, 1), (1, 2), (2, 2)])\n\n # These squares can jump to losing_start in one move, so are winning\n winning_start = set([(1, 3), (1, 4), (2, 3), (2, 4),\n (3, 1), (3, 2), (3, 3), (3, 4),\n (4, 1), (4, 2), (4, 3)])\n\n def nextset(x, y):\n def isvalid(coord):\n return True if coord[0] >= 1 and coord[1] >= 1 \\\n and coord[0] <= 15 and coord[1] <= 15 else False\n\n nextsquares = [(x - 2, y + 1), (x - 2, y - 1), (x + 1, y - 2),\n (x - 1, y - 2)]\n nextsquares = set([*filter(isvalid, nextsquares)])\n # print(nextsquares)\n return nextsquares\n\n # run a few times through whole board;\n # it takes 5 times to find a definitive win path for all 225 squares\n # 161 squares are winning for first player\n # 64 squares are losing starting for first player\n test_set = [(i, j) for i in range(1, 16) for j in range(1, 16)]\n times = 1\n while (len(winning_start) + len(losing_start)) < 225:\n for coords in test_set:\n x_ = coords[0]\n y_ = coords[1]\n thenextset = nextset(x_, y_)\n # print('testing', x_, y_, thenextset)\n\n if (x_, y_) in losing_start:\n # print('No Path, Second wins')\n pass\n elif (x_, y_) in winning_start:\n # print('One jump to terminal square, First wins')\n pass\n elif (len(winning_start.intersection(thenextset))\n == len(thenextset)):\n # if next set ONLY includes winning_starts, First loses because\n # he has no choice but give win to opponent\n # need to add x,y to losing_start\n losing_start.add((x_, y_))\n # print('we lose, Second wins')\n elif len(losing_start.intersection(thenextset)) > 0:\n # if next set includes ANY losing_start, we win by choosing it\n # need to add x,y to winning_start\n winning_start.add((x_, y_))\n # print('First wins')\n else:\n # print('do not know')\n pass\n\n print('Run', times, len(winning_start) + len(losing_start))\n times += 1\n\n print(len(winning_start))\n print(len(losing_start))\n\n # prints schematic of Winor Loss of each of 15x15 squares\n\n print(' '.join(map(str, [i for i in range(1, 16)])))\n for i in range(15):\n row = ''\n for j in range(15):\n if test_set[i * 15 + j] in winning_start:\n row = row + 'W '\n else:\n row = row + 'L '\n print(row + str(i))\n\n if (xin, yin) in winning_start:\n print('First wins with', xin, yin)\n return 'First'\n else:\n print('Second wins with', xin, yin)\n return 'Second'", "def get_next_moves(board, player):\r\n\r\n if player == 'hare':\r\n moves = []\r\n next_moves = []\r\n\r\n (row_from, col_from) = get_hare_positions(board)\r\n moves = possible_moves_list(row_from, col_from)\r\n\r\n for move in moves:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_from, col_from, row_to, col_to):\r\n \"\"\" if move is allowed then add to list of next moves\"\"\"\r\n next_moves.append(move)\r\n\r\n return next_moves\r\n\r\n else:\r\n \"\"\" for individual hounds\r\n get next moves\"\"\"\r\n moves = []\r\n next_moves_hound1 = []\r\n next_moves_hound2 = []\r\n next_moves_hound3 = []\r\n\r\n (row_hound_1, col_hound_1), (row_hound_2, col_hound_2), (row_hound_3, col_hound_3) = get_hound_positions(board)\r\n moves_hound1 = possible_moves_list(row_hound_1, col_hound_1)\r\n moves_hound2 = possible_moves_list(row_hound_2, col_hound_2)\r\n moves_hound3 = possible_moves_list(row_hound_3, col_hound_3)\r\n\r\n for move in moves_hound1:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_1, col_hound_1, row_to, col_to):\r\n next_moves_hound1.append(move)\r\n\r\n for move in moves_hound2:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_2, col_hound_2, row_to, col_to):\r\n next_moves_hound2.append(move)\r\n\r\n for move in moves_hound3:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_3, col_hound_3, row_to, col_to):\r\n next_moves_hound3.append(move)\r\n\r\n return (next_moves_hound1, next_moves_hound2, next_moves_hound3)", "def winner(board):\n x_in_board = []\n o_in_board = []\n winning_positions = [\n [[0, 0], [0, 1], [0, 2]],\n [[1, 0], [1, 1], [1, 2]],\n [[2, 0], [2, 1], [2, 2]],\n [[0, 0], [1, 0], [2, 0]],\n [[0, 1], [1, 1], [2, 1]],\n [[0, 2], [1, 2], [2, 2]],\n [[0, 0], [1, 1], [2, 2]],\n [[0, 2], [1, 1], [2, 0]]\n ]\n\n for i in range(len(board)):\n for j in range(len(board)):\n if board[i][j] == X:\n x_in_board.append([i, j])\n elif board[i][j] == O:\n o_in_board.append([i, j])\n\n for i in winning_positions:\n if i[0] in x_in_board and i[1] in x_in_board and i[2] in x_in_board:\n return X\n elif i[0] in o_in_board and i[1] in o_in_board and i[2] in o_in_board:\n return O\n\n return None", "def player(board):\n if terminal(board) == True:\n return None \n countO, countX = 0, 0\n for i in range(3):\n for j in range(3):\n if board[i][j] == X:\n countX += 1\n elif board[i][j] == O:\n countO += 1\n if countO >= countX:\n return X\n else:\n return O", "def populate_positions(self, board, player):\n\t\tprint \"Inside populate_positions!-----\"\n\t\tpositions = []\n\t\tfor i in range(0,3):\n\t\t\tfor j in range(0,3):\n\t\t\t\tif(board[i][j] == player):\n\t\t\t\t\tpositions.append([i,j])\n\t\treturn positions", "def heuristic_takeAllPiece(board, player):\n\n if player is board._WHITE:\n return board._nbWHITE - board._nbBLACK\n \n return board._nbBLACK - board._nbWHITE", "def player(board):\n if board == initial_state():\n return X\n\n total_x = 0\n total_o = 0\n\n for i in board:\n total_x += i.count(X)\n total_o += i.count(O)\n\n if (total_x + total_o) % 2 == 1:\n return O\n else:\n return X", "def getPlayer(board):\n count_x, count_o = 0, 0\n for row in board:\n count_x += row.count(X)\n count_o += row.count(O)\n if count_x > count_o:\n return O\n return X", "def _is_winning_combination(board, combination, player):\n\n \"\"\"\n ### Code before refactoring into a comprehension list:\n\n for a_tuple in combination:\n\n # e.g. a_tuple = (0,0)\n # if board[0][0] != \"X\"\n if board[a_tuple[0]][a_tuple[1]] != player:\n\n return False\n \"\"\"\n\n if any(a_tuple for a_tuple in combination if board[a_tuple[0]][a_tuple[1]] != player):\n return False\n\n return True", "def player(board):\n\tif board == initial_state():\n\t\treturn X\n\n\tnumX=0\n\tnumO=0\n\n\tfor i in range(len(board)):\n\t\tfor j in range(len(board)):\n\t\t\tif(board[i][j]==X):\n\t\t\t\tnumX+=1\n\t\t\telif(board[i][j]==O):\n\t\t\t\tnumO+=1\n\n\tif numX > numO:\n\t\treturn O\n\telse:\n\t\treturn X", "def won(self):\n for y in range(self.size):\n winning = []\n for x in range(self.size):\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n for x in range(self.size):\n winning = []\n for y in range(self.size):\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n winning = []\n for y in range(self.size):\n x = y\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n winning = []\n for y in range(self.size):\n x = self.size-1-y\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n return None", "def AI(current_board, AI_symbol, opponent_symbol, difficulty): #Written by Cody West\n victory_conditions = [[0,4,8],[2,4,6],[0,1,2],[3,4,5],[6,7,8],[0,3,6],[1,4,7],[2,5,8]] #Establishes victory conditions to be checked\n if difficulty >= 2: #If difficulty is at least 2\n ## Cody -- you could just write:\n ## for slots in victory_conditions\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions ## Oops\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list ## Oops \n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n ## This you can do even more efficiently using a beautiful syntax called\n ## \"list comprehension\" which entered python some years ago -- watch\n ## me do it in one line:\n ## check = [current_board[s] for s in slots]\n if check.count(AI_symbol)==2 and check.count(\" \")==1: #If there are any rows where the AI has two symbols and there's one empty spot\n return(slots[check.index(\" \")]) #Return the empty spot from that row\n ## Oops -- you repeat the code again here for no reason\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list\n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n if check.count(opponent_symbol)==2 and check.count(\" \")==1: #If there are any rows where the opponent has two symbols and there's one empty spot\n return(slots[check.index(\" \")]) #Return the empty spot from that row\n if difficulty >= 3: #If difficulty is at least 3\n ## It looks like you're doing an identical loop here -- I\n ## wonder why you don't move the if statement inside the loop\n ## -- I believe that would significantly shorten your code\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list\n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n if check.count(AI_symbol)==1 and check.count(\" \")==2: #If there are any rows where the AI has one symbol and there's two empty spots\n if check[0] == \" \": #If the first slot from check is empty\n return(slots[0]) #Return the first slot\n else: \n return(slots[2]) #Return the third slot\n if difficulty == 4: #If difficulty is 4\n if current_board[4] == \" \": #If the center is empty\n return(4) #Take the center\n elif current_board[0] or current_board[2] or current_board[6] or current_board[8] == \" \": #Else, if a corner is open\n corners = 2*random.randint(0,4) #Selects a random corner (or center, which will reject)\n while current_board[corners] != \" \": #Until the corner selected is empty\n corners = 2*random.randint(0,4) #Select a new corner or center\n return(corners) #Return empty corner\n else:\n sides = 2*random.randint(0,3)+1 #Selects a side\n while current_board[sides] != \" \": #Until the side is empty\n sides = 2*random.randint(0,3)+1 #Selects a new side\n return(sides) #Returns empty side\n if difficulty < 4: #If difficulty is less than 4\n ran = random.randint(0,8) #Picks random spot on board\n while current_board[ran] != \" \": #Until the spot is empty\n ran = random.randint(0,8) #Picks a new spot\n return(ran) #Returns empty spot", "def winner(board):\n # finite list of possible wins\n winnings = [\n (0, 0), (0, 1), (0, 2), \n (1, 0), (1, 1), (1, 2),\n (2, 0), (2, 1), (2, 2),\n (0, 0), (1, 0), (2, 0),\n (0, 1), (1, 1), (2, 1),\n (0, 2), (1, 2), (2, 2),\n (0, 0), (1, 1), (2, 2),\n (2, 0), (1, 1), (0, 2)\n ]\n # if the board has one of the lists in winnings \n # then the piece in one of those spots is the winner\n xcount = 0\n ocount = 0\n for i in range(len(winnings)):\n if(board[winnings[i][0]][winnings[i][1]] == X):\n xcount += 1\n if(board[winnings[i][0]][winnings[i][1]] == O):\n ocount += 1\n if((i + 1) % 3 == 0):\n if(ocount == 3 or xcount == 3):\n return board[winnings[i][0]][winnings[i][1]]\n else:\n ocount = 0\n xcount = 0\n return EMPTY", "def win(self, player):\n if player == 1:\n a = self.player_one.moves\n else:\n a = self.player_two.moves\n winning_moves = []\n for i in range(1, 9, 3):\n winning_moves.append(range(i, i + 3))\n for i in range(1, 4):\n winning_moves.append(range(i, i + 7, 3))\n winning_moves.append([1, 5, 9])\n winning_moves.append([3, 5, 7])\n for move in winning_moves:\n flg = True\n for index in move:\n if index not in a:\n flg = False\n break\n if flg:\n return True, player\n if len(self.player_one.moves) + len(self.player_two.moves) == 9:\n self.print_space()\n self.display_board()\n self.print_space()\n print \" Games is drawn\"\n self.logging.debug(\"Game is draw, nobody won\")\n self.logging.debug(\"Enjoy the game again :)\")\n sys.exit(100)\n return False, player", "def player(board):\n if board == initial_state():\n return X\n \n # if board has lesser or eual X(s) than O(s)\n if sum([row.count(X) for row in board]) <= sum([row.count(O) for row in board]):\n return X\n else:\n return O", "def check_board(board_state, player_symbol, display_message = False):\n\n is_board_completely_filled = board_state.isalpha()\n\n indices_set = set([ind+1 for ind, val in enumerate(board_state) if val == player_symbol])\n\n if {1, 2, 3}.issubset(indices_set) or {4, 5, 6}.issubset(indices_set) or {7, 8, 9}.issubset(indices_set):\n\n if display_message:\n print(\"Row completed..!!!\")\n print(\"Player \"+player_symbol+\" won the game.\")\n\n return True\n\n if {1, 4, 7}.issubset(indices_set) or {2, 5, 8}.issubset(indices_set) or {3, 6, 9}.issubset(indices_set):\n\n if display_message:\n print(\"Column completed..!!!\")\n print(\"Player \"+player_symbol+\" won the game.\")\n\n return True\n if {1, 5, 9}.issubset(indices_set) or {3, 5, 7}.issubset(indices_set):\n\n if display_message:\n print(\"Diagonal completed..!!!\")\n print(\"Player \"+player_symbol+\" won the game.\")\n\n return True\n\n if is_board_completely_filled:\n\n if display_message:\n print(\"Game is drawn...!\")\n\n return \"Draw\"\n\n return False", "def minimax(state, depth, player):\n if depth == 9:\n row = choice([0, 1, 2])\n col = choice([0, 1, 2])\n return row, col, ''\n\n if player == COMP:\n best = [-1, -1, float(\"-inf\")]\n else:\n best = [-1, -1, float(\"inf\")]\n\n if depth == 0 or state.has_tic_tac_toe(COMP) or state.has_tic_tac_toe(HUMAN):\n score = heuristic(state, depth)\n return [-1, -1, score]\n \"\"\"\n Checks if any of the player is one away from winning in any board and make the appropriate move.\n \"\"\"\n if player==COMP:\n empty_cells=get_empty_cells(state)\n dangerous_cells=state.is_one_away_from_tic_tac_toe((player%2)+1)\n if dangerous_cells:\n found_dangerous_cells=True\n else:\n found_dangerous_cells=False\n print \"no dangerous local boards\"\n favoring_cells=state.is_one_away_from_tic_tac_toe(player)\n if favoring_cells:\n found_favoring_cells=True\n else:\n found_favoring_cells=False\n print \"no favoring local boards\"\n if found_dangerous_cells==False and found_favoring_cells==False:\n pass\n if found_dangerous_cells==False and found_favoring_cells==True:\n empty_cells[:]=[]\n for cell in favoring_cells:\n empty_cells.append(cell)\n if found_dangerous_cells==True and found_favoring_cells==False:\n empty_cells[:]=[]\n for cell in dangerous_cells:\n empty_cells.append(cell)\n if found_dangerous_cells==True and found_favoring_cells==True:\n empty_cells[:]=[]\n for cell in dangerous_cells:\n empty_cells.append(cell)\n else:\n empty_cells=get_empty_cells(state)\n for cell in empty_cells:\n row, col = cell[0], cell[1]\n state.board[row][col] = player\n score = minimax(state, depth - 1, (player % 2) + 1)\n state.board[row][col] = 0\n score[0], score[1] = row, col\n if player == COMP:\n if score[2] >= best[2]:\n if score[2]==best[2]:\n \"\"\"\n Favors middle positions over sides or corners\n MIDDLE > CORNERS > SIDES\n \"\"\"\n if (best[0]==0 and best[1]==0) or (best[0]==0 and best[1]==2) or (best[0]==2 and best[1]==0) or (best[0]==2 and best[1]==2):\n if score[0]==0 and score[1]==0: #favoring centre position over diagonal position\n best=score\n print(\"centre position chosen over diagonal positions\")\n else:\n if ((score[0]==0 and score[1]==1) or (score[0]==1 and score[1]==0) or (score[0]==1 and score[1]==2) or (score[0]==2 and score[1]==1))==0:\n best=score #favoring any position over side position as long as the new position is not a side position too\n print(\"diagonal and centre positions chosen over side positions\")\n else:\n best = score\n else:\n bestMoves=[]\n if score[2] < best[2]:\n best=score\n return best", "def player(board):\r\n if terminal(board):\r\n return \"X\"\r\n \r\n count_X = 0\r\n count_O = 0\r\n for row in board:\r\n for col in row:\r\n if col == \"X\":\r\n count_X += 1\r\n elif col == \"O\":\r\n count_O += 1\r\n \r\n if count_X > count_O:\r\n return \"O\"\r\n else:\r\n return \"X\"\r\n \r\n raise NotImplementedError", "def winner(board):\n for turn in [X,O]:\n for i in range(3):\n if board[i] == [turn, turn, turn]:\n return turn\n if board[0][i] == turn and board[1][i] == turn and board[2][i] == turn:\n return turn\n if board[0][0] == turn and board[1][1] == turn and board[2][2] == turn:\n return turn\n if board[0][2] == turn and board[1][1] == turn and board[2][0] == turn:\n return turn\n return None", "def play_game():\n board = create_board()\n while True:\n for player in [1, 2]:\n random_place(board, player)\n result = evaluate(board)\n if result != 0:\n return result", "def check_combo(self, matrix, player=None):\n if player is None:\n player = self.current_player\n \n if self.N * player in np.sum(matrix, axis=0):\n return player\n if self.N * player in np.sum(matrix, axis=1):\n return player\n if np.sum(matrix.diagonal()) == self.N * player:\n return player\n if np.sum(np.fliplr(matrix).diagonal()) == self.N * player:\n return player\n return 0", "def player(board):\n turn = 0\n for i in range(3):\n for j in range(3):\n if board[i][j] != EMPTY:\n turn+=1\n if turn % 2 != 0:\n return O\n else:\n return X", "def capture(board,player,possible_moves, pos, loc, i):\r\n \r\n if player == 1 and encode(loc+i) in board.black:\r\n next_piece = encode(loc+i) \r\n new_pos = int(loc + (i*2)) \r\n if not((pos[0] == 'B') and ((next_piece)[0] == 'A')) and not((pos[0] == 'G') and ((next_piece)[0] == 'H')):\r\n new_enc = encode(new_pos)\r\n if new_pos in range(0,112) and (new_enc not in board.white) and (new_enc not in board.black) and (new_enc not in board.empty):\r\n possible_moves.append([pos,new_enc,next_piece])\r\n return True\r\n \r\n if player == -1 and encode(loc+i) in board.white: \r\n next_piece = encode(loc+i) \r\n new_pos = int(loc + (i*2))\r\n if not((pos[0] == 'B') and ((next_piece)[0] == 'A')) and not((pos[0] == 'G') and ((next_piece)[0] == 'H')):\r\n new_enc = encode(new_pos)\r\n if new_pos in range(0,112) and (new_enc not in board.white) and (new_enc not in board.black) and (new_enc not in board.empty):\r\n possible_moves.append([pos,new_enc,next_piece])\r\n return True\r\n \r\n return False", "def winFor(self,player):\n if(self.cachedWin == False):\n won = False;\n if(player==WHITE):\n for x in range(0,WIDTH):\n if(self.gameState[x,0]==WHITE):\n won = True\n \n elif(player==BLACK):\n for x in range(0,WIDTH):\n if(self.gameState[x,HEIGHT-1]==BLACK):\n won = True\n \n if(len(self.successors()) == 0):#IF there are no available moves for both players\n bCount = self.count(BLACK) #check who has the most pawns\n wCount = self.count(BLACK)\n if(bCount>wCount):\n self.cachedWin = True\n self.cachedWinner = player\n return True\n if(wCount>bCount):\n self.cachedWin = True\n self.cachedWinner = player\n return True\n \n if(won):\n self.cachedWin = True\n self.cachedWinner = player\n return True\n else:\n return False\n else:\n return player == self.cachedWinner", "def get_possible_moves(board):\n\n possible_moves = []\n\n ret_tuple_left = move_left(board)\n ret_tuple_right = move_right(board)\n ret_tuple_up = move_up(board)\n ret_tuple_down = move_down(board)\n\n if ret_tuple_left[0]:\n possible_moves.append(ret_tuple_left[1])\n if ret_tuple_right[0]:\n possible_moves.append(ret_tuple_right[1])\n if ret_tuple_up[0]:\n possible_moves.append(ret_tuple_up[1])\n if ret_tuple_down[0]:\n possible_moves.append(ret_tuple_down[1])\n\n return possible_moves", "def player(board):\n X_count = 0\n O_count = 0\n #to determine the turn, I will make a count of the X and O tokens on the board\n for row in board:\n #I create a dictionary with the count on each row\n player_turns = {i: row.count(i) for i in row}\n #I check if I have X and O tokens in the row, if not, create an entry with 0\n if not (player_turns.get(\"X\")):\n player_turns['X'] = 0\n if not player_turns.get(\"O\"):\n player_turns['O'] = 0\n #I add to my counter the total amount of tokens found for each player in this row\n X_count = X_count + int(player_turns['X'])\n O_count = O_count + int(player_turns['O'])\n\n #if X has the same amount of tokens than O, it means it is X's turn\n if(X_count == O_count):\n #It should be X's turn. \n return \"X\"\n #Otherwise, it is O's turn.\n elif(X_count>O_count):\n #it is O's turn.\n return \"O\"", "def actions(board):\n if terminal(board):\n return 7\n\n possible = {}\n possible = set()\n\n x = 0\n for i in board:\n y = 0\n for j in i:\n if j == EMPTY:\n index = (x, y)\n possible.add(index)\n y = y+1\n x=x+1\n\n return possible", "def get_possibles_moves(board: numpy.ndarray) -> List[Move]:\n return [tuple(k) for k in numpy.argwhere(board == -1) if 0 != k[0] != board.shape[0] - 1 != k[1] != 0]", "def player(board):\n x = 0\n o = 0\n for row in board:\n for m in row:\n if m == \"X\":\n x += 1\n if m == \"O\":\n o += 1\n return \"O\" if x > o else \"X\"", "def poss_by_block(self):\n block_horiz, block_vert = self.board_size // self.c_size, self.board_size // self.r_size\n lpos = [self.c_size * no for no in range(block_horiz)]\n vpos = [self.r_size * no for no in range(block_vert)]\n row_poss, col_poss = [], []\n for row in range(self.board_size):\n poss = self.possibles[row]\n poss = [poss[x:x + self.r_size] for x in lpos]\n poss = [{d for subset in p for d in subset} for p in poss]\n row_poss.append(poss)\n for col in range(self.board_size):\n poss = [self.possibles[row][col] for row in range(self.board_size)]\n poss = [poss[x:x + self.c_size] for x in vpos]\n poss = [{d for subset in p for d in subset} for p in poss]\n col_poss.append(poss)\n return row_poss, col_poss", "def player(board):\n xcount, ocount = 0, 0\n for row in board:\n xcount += row.count(X)\n ocount += row.count(O)\n if xcount > ocount:\n return O\n elif xcount == 0 and ocount == 0:\n return X\n elif xcount == ocount:\n return X", "def winner(board):\n \n possible_wins = []\n row1 = board[0]\n row2 = board[1]\n row3 = board[2]\n col1 = [board[0][0],board[1][0],board[2][0]]\n col2 = [board[0][1],board[1][1],board[2][1]]\n col3 = [board[0][2],board[1][2],board[2][2]]\n diag1 = [board[0][0],board[1][1],board[2][2]]\n diag2 = [board[2][0],board[1][1],board[0][2]]\n \n possible_wins.append(row1)\n possible_wins.append(row2)\n possible_wins.append(row3)\n possible_wins.append(col1)\n possible_wins.append(col2)\n possible_wins.append(col3)\n possible_wins.append(diag1)\n possible_wins.append(diag2)\n \n for trait in possible_wins:\n if trait.count(\"X\") == 3:\n return \"X\"\n elif trait.count(\"O\") == 3:\n return \"O\"\n \n return None", "def get_all_pieces(self, player):\n pieces = []\n for row in range(constant.BOARD_DIMENSION):\n for col in range(constant.BOARD_DIMENSION):\n piece = self.get_piece((row, col))\n if piece is not None and piece.get_player() is player:\n pieces.append(piece)\n return pieces", "def test_winner(state_board):\n\tres = 3 #default value is tie game\n\tptsb = 0 #points for the black\n\tptsw = 0 #points for the white\n\t\n\t#looks in the board if there is an empty case while\n\t# counting the number of points for each player\n\tfor i in state_board:\n\t\tfor j in i:\n\t\t\tif j == 0:\n\t\t\t\tres = 0\n\t\t\telif j == 1:\n\t\t\t\tptsb += 1\n\t\t\telif j == 2:\n\t\t\t\tptsw += 1\n\t\n\t#if there is an empty case, looks for possibilities\n\t# for the other player, if no possibility test for the points\n\t#if no empty case\n\t# test for points\n\t#else return 0\n\tif res == 0:\n\t\tif possible(state_board,1) == []:\n\t\t\tif possible(state_board,2) == []:\n\t\t\t\tres = count_points(ptsb,ptsw)\n\t\t\telse:\n\t\t\t\tres = 5\n\t\telif possible(state_board,2) == []:\n\t\t\tres = 4\n\telse:\n\t\tres = count_points(ptsb,ptsw)\n\treturn res", "def get_groups(board: numpy.ndarray, player: int) -> List[Group]:\n # Generate couples\n # Array of (p1, p2, x) where x = -1 if p1 == p2, 0 if p1 and p2 are close and 1 if they are close\n couples = []\n size = board.shape[0]\n for i in range(1, size - 1):\n for j in range(1, size - 1):\n if board[i, j] == player:\n l0 = [(i + x, j + y) for x, y in NEIGHBORS_1]\n l1 = [(i + x, j + y) for x, y in NEIGHBORS_2]\n for p in l0 + l1 + [(i, j)]:\n corner = all([x in [0, size - 1] for x in p])\n if 0 <= p[0] < size and 0 <= p[1] < size and board[p] == player and not corner:\n if p == (i, j):\n couples.append(((i, j), p, -1))\n elif p in l0:\n couples.append(((i, j), p, 0))\n else:\n p1, p2 = get_common_neighbours((i, j), p)\n if player not in [board[p1], board[p2]] and (board[p1] == -1 and board[p2] == -1):\n couples.append(((i, j), p, 1))\n\n # Group couples\n groups = [[k] for k in couples]\n\n def fusion(f_groups):\n for group1 in f_groups:\n for group2 in f_groups:\n if group1 != group2:\n for c1 in group1:\n for c2 in group2:\n if c1[0] == c2[0] or c1[0] == c2[1] or c1[1] == c2[0] or c1[1] == c2[1]:\n group1.extend(group2)\n f_groups.remove(group2)\n return True\n return False\n\n while fusion(groups):\n pass\n\n return groups", "def check_winner(self):\n for row in self.board.values():\n if all([mark == \"x\" for mark in row]):\n return self.player_1\n elif all([mark == \"o\" for mark in row]):\n return self.player_2\n\n # checks every column\n for i in range(3):\n first_row, second_row, third_row = self.board.values()\n if first_row[i] == \"x\" and second_row[i] == \"x\" and third_row[i] == \"x\":\n return self.player_1\n elif first_row[i] == \"o\" and second_row[i] == \"o\" and third_row[i] == \"o\":\n return self.player_2\n\n # checks the diagonals\n if self.board[\"a\"][0] == \"x\" and self.board[\"b\"][1] == \"x\" and self.board[\"c\"][2] == \"x\":\n return self.player_1\n if self.board[\"a\"][2] == \"o\" and self.board[\"b\"][1] == \"o\" and self.board[\"c\"][0] == \"o\":\n return self.player_2\n\n return None", "def winner(board):\n # return 0[[0EMPTY, 1EMPTY, 2EMPTY],\n # 1[EMPTY, EMPTY, EMPTY],\n # 2[EMPTY, EMPTY, EMPTY]]\n # Check columns\n if board[0][0] == board[1][0] and board[1][0] == board[2][0]:\n return board[0][0]\n elif board[0][1] == board[1][1] and board[1][1] == board[2][1]:\n return board[0][1]\n elif board[0][2] == board[1][2] and board[1][2] == board[2][2]:\n return board[0][2]\n # Check rows\n elif all(i == board[0][0] for i in board[0]):\n return board[0][0]\n elif all(i == board[1][0] for i in board[1]):\n return board[1][0]\n elif all(i == board[2][0] for i in board[2]):\n return board[2][0]\n # Check diagonals\n elif board[0][0] == board[1][1] and board[1][1] == board[2][2]:\n return board[0][0]\n elif board[0][2] == board[1][1] and board[1][1] == board[2][0]:\n return board [0][2]\n else:\n return None", "def get_winner(board):\n\n def who_won(in_a_row, board_size, cur_player):\n \"\"\" \n a function private to get_winner() (yes you can do this. Cool huh!?) \n that tells get_winner if it has a winner \n \"\"\"\n if in_a_row == board_size:\n return 1 if cur_player == 'X' else 2\n else:\n return 0\n\n def test_row_col(board, rows):\n \"\"\" private function to test the rows and columns \"\"\"\n for i in range(len(board)):\n cur_player = board[i][0] if rows else board[0][i]\n in_a_row = 0\n for j in range(len(board)):\n symbol = board[i][j] if rows else board[j][i]\n if (not symbol == '-') and (symbol == cur_player):\n in_a_row += 1\n else:\n break\n winner = who_won(in_a_row, len(board), cur_player)\n if not winner == 0:\n return winner\n return 0\n\n def test_diagonal(board, normal):\n \"\"\" private function to test the two diagonals \"\"\"\n cur_player = board[0][0] if normal else board[0][len(board)-1]\n in_a_row = 0\n for i in range(len(board)):\n symbol = board[i][i] if normal else board[i][len(board)-1-i]\n if (not symbol == '-') and (symbol == cur_player):\n in_a_row += 1 \n else:\n break\n winner = who_won(in_a_row, len(board), cur_player)\n if not winner == 0:\n return winner\n return 0\n\n\n # test rows\n winner = test_row_col(board, True)\n if not winner == 0:\n return winner\n\n # test cols\n winner = test_row_col(board, False)\n if not winner == 0:\n return winner\n\n # test diagonal from top left to bottom right\n winner = test_diagonal(board, True)\n if not winner == 0:\n return winner\n\n # test diagonal from top right to bottom left\n winner = test_diagonal(board, False)\n if not winner == 0:\n return winner\n\n return 0", "def check_for_win(self, row, col, player): \n\n count = 0\n for i in range(0, len(self.board[0])):\n # Check vertical\n if self.board[row][i] == player:\n count += 1\n else:\n count = 0\n \n if count == self.max_count:\n return True\n\n count = 0\n for i in range(0, len(self.board)):\n # Check horisontal\n if self.board[:, col][i] == player:\n count += 1\n else:\n count = 0\n \n if count == self.max_count:\n return True\n \n count = 0\n totoffset = col - row\n for i in np.diagonal(self.board, offset=totoffset):\n # Check diagonal\n if i == player:\n count += 1\n else:\n count = 0\n\n if count == self.max_count:\n return True\n\n count = 0\n mirrorboard = np.fliplr(self.board)\n col = self.colswitch[col]\n totoffset = col - row\n for i in np.diagonal(mirrorboard, offset=totoffset):\n # Check other diagonal\n if i == player:\n count += 1\n else:\n count = 0\n\n if count == self.max_count:\n return True", "def find_possible_moves(self, board, self_color):\r\n possible_moves = []\r\n delta = [(0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1)]\r\n\r\n for r in range(len(board)):\r\n for c in range(len(board[r])):\r\n if board[r][c] == self_color:\r\n for i in range(0, 8):\r\n coords = (r, c)\r\n\r\n found_move = self.check_moves(board, self_color, coords, delta[i])\r\n\r\n if found_move is not None and found_move not in possible_moves:\r\n possible_moves.append(found_move)\r\n return possible_moves", "def look_for_win(self, board, player=None):\n\n win_spot = None\n if player is None:\n player = self\n\n for group in WINS:\n # creates a list of just the elements of the board which are\n # part of a specific win group and and not already owned by the player\n # and creates a list of tuples of the element and its value.\n not_mine = [(i, val) for i, val in enumerate(board.tttboard)\n if i in group\n and val != player.board_value]\n\n # If there's only one not owned by the ai player and not owned by\n # the other player then select it and we've won\n if len(not_mine) == 1 and not_mine[0][1] is None:\n # Maybe this should return the selection rather than\n # modifying the board in here. Decide later.\n win_spot = not_mine[0][0]\n break\n\n return win_spot", "def get_all_positions(board, white_turn):\n list = []\n for row in range(8):\n for col in range(8):\n # White\n if white_turn and white_piece_on_pos((row, col), board):\n obj = board[row][col]\n if type(obj) is Pawn:\n for valid_pos in valid_positions_pawn_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Tower:\n for valid_pos in valid_positions_tower_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Bishop:\n for valid_pos in valid_positions_bishop_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Horse:\n for valid_pos in valid_positions_horse_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Queen:\n for valid_pos in valid_positions_queen_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is King:\n for valid_pos in valid_positions_king_white((row, col), board):\n list.append(((row, col), valid_pos))\n # Black\n elif (not white_turn) and black_piece_on_pos((row, col), board):\n obj = board[row][col]\n if type(obj) is Pawn:\n for valid_pos in valid_positions_pawn_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Tower:\n for valid_pos in valid_positions_tower_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Bishop:\n for valid_pos in valid_positions_bishop_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Horse:\n for valid_pos in valid_positions_horse_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Queen:\n for valid_pos in valid_positions_queen_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is King:\n for valid_pos in valid_positions_king_black((row, col), board):\n list.append(((row, col), valid_pos))\n return list", "def player(board):\n x_turn = 0\n o_turn = 0\n for i in range(3):\n for j in range(3):\n if board[i][j] == X:\n x_turn += 1\n elif board[i][j] == O:\n o_turn += 1\n if x_turn == 0 and o_turn == 0:\n return X\n elif x_turn > o_turn:\n return O\n elif x_turn == o_turn:\n return X\n return X", "def who_won(self, board):\n winners = set()\n for x,y,z in self.wins:\n if board[x] == board[y] and board[y] == board[z]:\n winners.add(board[x])\n if 1 in winners and 2 in winners:\n return 3\n if 1 in winners:\n return 1\n if 2 in winners:\n return 2\n return 0", "def checkForWin(self, board, player):\n\t\tif ((board[0][0] == player and board[0][1] == player and board[0][2] == player) or\n\t\t\t(board[1][0] == player and board[1][1] == player and board[1][2] == player) or\n\t\t\t(board[2][0] == player and board[2][1] == player and board[2][2] == player) or\n\t\t\t(board[0][0] == player and board[1][1] == player and board[2][2] == player) or\n\t\t\t(board[0][2] == player and board[1][1] == player and board[2][0] == player) or\n\t\t\t(board[0][0] == player and board[1][0] == player and board[2][0] == player) or\n\t\t\t(board[0][1] == player and board[1][1] == player and board[2][1] == player) or\n\t\t\t(board[0][2] == player and board[1][2] == player and board[2][2] == player)):\n\t\t\tprint(\"----------------------------\")\n\t\t\tprint(\"Yay! Player%d is the winner!\" % player)\n\t\t\tprint(\"----------------------------\")\n\t\t\tself.win = player", "def player(board):\n number_of_X = 0\n number_of_O = 0\n \n for row in board: \n number_of_X += row.count(\"X\")\n number_of_O += row.count(\"O\")\n \n \n if number_of_X <= number_of_O:\n return(\"X\") \n else: \n return(\"O\")", "def heuristic(self):\r\n # 1.\r\n blacks, whites = 0, 0\r\n weights = [0 for _ in range(6)]\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n user_dir = directions[:2] if self.current_player == 'n' else directions[2:]\r\n for i in range(8):\r\n for j in range(8):\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n if self.matrix[i][j] == self.current_player or self.matrix[i][j] == self.current_player.upper():\r\n\r\n # numarul de piese rege\r\n if self.matrix[i][j] == self.current_player.upper():\r\n weights[1] += 7.75\r\n\r\n # numarul de piese normale\r\n else:\r\n weights[0] += 5\r\n\r\n # numarul de piese de pe baseline in functie de tipul de piesa\r\n # conform strategiilor de joc este o strategie buna sa ai cat mai multe\r\n # piesa pe baseline pentru a preveni creare de piese de tip rege ale adversarului\r\n if self.current_player in ['n', 'N']:\r\n if i == 7:\r\n weights[2] += 4\r\n elif self.current_player in ['a', 'A']:\r\n if i == 0:\r\n weights[2] += 4\r\n\r\n # numarul de piese din mijlocul tablei\r\n # la fel este o strategie buna pentru atac\r\n if 3 <= i <= 4 and 3 <= j <= 4:\r\n weights[3] += 2\r\n\r\n # numar piese vulnerabile\r\n # adica piese ce pot fi capturate de oponent la urmatoare tura\r\n for d in user_dir:\r\n\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n weights[4] -= 3\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n # daca elimin o piesa rege este o mutare mai buna\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[5] += 10\r\n else:\r\n weights[5] += 7\r\n\r\n diff = (blacks - whites) if self.current_player == 'n' else (whites - blacks)\r\n # cand sunt mai putin piese, AI adopta o tactica mai ofensiva\r\n if blacks + whites <= 10:\r\n return sum(weights) + diff\r\n return sum(weights)", "def determine_winners(self, players=None):\n players_and_cards = [(holding.player.id, holding.codes) for holding in self.live_holdings]\n if players:\n player_ids = [p.id for p in players]\n players_and_cards = [d for d in players_and_cards if d[0] in player_ids]\n winners = determine_winners(players_and_cards, self.board.codes)\n return [Player.query.get(winner) for winner in winners]", "def winner(board):\r\n\r\n #rows:\r\n if (board[0][0] == board[0][1] == board[0][2]) and (board[0][0] == \"X\" or board[0][0] == \"O\"):\r\n return board[0][0]\r\n if (board[1][0] == board[1][1] == board[1][2]) and (board[1][0] == \"X\" or board[1][0] == \"O\"):\r\n return board[1][0]\r\n if (board[2][0] == board[2][1] == board[2][2]) and (board[2][0] == \"X\" or board[2][0] == \"O\"):\r\n return board[2][0]\r\n\r\n #columns\r\n if (board[0][0] == board[1][0] == board[2][0]) and (board[0][0] == \"X\" or board[0][0] == \"O\"):\r\n return board[0][0]\r\n if (board[0][1] == board[1][1] == board[2][1]) and (board[0][1] == \"X\" or board[0][1] == \"O\"):\r\n return board[0][1]\r\n if (board[0][2] == board[1][2] == board[2][2]) and (board[0][2] == \"X\" or board[0][2] == \"O\"):\r\n return board[0][2]\r\n\r\n #diagonals\r\n if (board[0][0] == board[1][1] == board[2][2]) and (board[0][0] == \"X\" or board[0][0] == \"O\"):\r\n return board[0][0]\r\n if (board[0][2] == board[1][1] == board[2][0]) and (board[0][2] == \"X\" or board[0][2] == \"O\"):\r\n return board[0][2]\r\n \r\n return None\r\n\r\n raise NotImplementedError", "def check_complete_board(start_pos, dim_square, board):\n change = False\n for row in range(8):\n for col in range(8):\n # Grab image on real board\n im = region_grabber((start_pos[0] + col * dim_square[0],\n start_pos[1] - (row + 1.0) * dim_square[1],\n start_pos[0] + (col + 1.0) * dim_square[0],\n start_pos[1] - row * dim_square[1]))\n\n # Check if piece corresponds with piece on board if there is a piece\n if piece_on_pos((row, col), board):\n obj = board[row][col]\n if (row + col) % 2 == 0: # Black background\n pos = imagesearcharea(obj.im_b, 0, 0, 0, 0, 0.9, im)\n if pos != [-1, -1]:\n continue\n else: # White background\n pos = imagesearcharea(obj.im_w, 0, 0, 0, 0, 0.9, im)\n if pos != [-1, -1]:\n continue\n\n # Else --> Go through every possible image\n if (row + col) % 2 == 0: # Black background\n # Pawn\n pos = imagesearcharea(\"Images/PWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PW\")\n continue\n pos = imagesearcharea(\"Images/PBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PB\")\n continue\n # Tower\n pos = imagesearcharea(\"Images/TWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TW\")\n continue\n pos = imagesearcharea(\"Images/TBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TB\")\n continue\n # Horse\n pos = imagesearcharea(\"Images/HWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HW\")\n continue\n pos = imagesearcharea(\"Images/HBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HB\")\n continue\n # Bishop\n pos = imagesearcharea(\"Images/BWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BW\")\n continue\n pos = imagesearcharea(\"Images/BBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BB\")\n continue\n # King\n pos = imagesearcharea(\"Images/KWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KW\")\n continue\n pos = imagesearcharea(\"Images/KBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KB\")\n continue\n # Queen\n pos = imagesearcharea(\"Images/QWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QW\")\n continue\n pos = imagesearcharea(\"Images/QBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QB\")\n continue\n board[row][col] = None\n else: # White background\n # Pawn\n pos = imagesearcharea(\"Images/PWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PW\")\n continue\n pos = imagesearcharea(\"Images/PBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PB\")\n continue\n # Tower\n pos = imagesearcharea(\"Images/TWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TW\")\n continue\n pos = imagesearcharea(\"Images/TBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TB\")\n continue\n # Horse\n pos = imagesearcharea(\"Images/HWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HW\")\n continue\n pos = imagesearcharea(\"Images/HBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HB\")\n continue\n # Bishop\n pos = imagesearcharea(\"Images/BWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BW\")\n continue\n pos = imagesearcharea(\"Images/BBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BB\")\n continue\n # King\n pos = imagesearcharea(\"Images/KWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KW\")\n continue\n pos = imagesearcharea(\"Images/KBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KB\")\n continue\n # Queen\n pos = imagesearcharea(\"Images/QWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QW\")\n continue\n pos = imagesearcharea(\"Images/QBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QB\")\n continue\n board[row][col] = None\n\n if change:\n pyautogui.moveTo(start_pos[0] + 4 * dim_square[0],\n start_pos[1] - 4 * dim_square[1], 0.2)\n\n return change", "def possible_moves(self, piece):\n def _index(orig, off):\n \"\"\"Helper function to find the new index.\"\"\"\n orig_x, orig_y = orig\n off_x, off_y = off\n return (orig_y - off_y) * self.ncols + (orig_x - off_x)\n\n p_x, p_y = piece\n p_i = _index(piece, (0, 0))\n\n # pass a list of the four corners first for basic possibles\n move_land = [((p_x + i, p_y + j), self.squares[_index(piece, (i, j))])\\\n for i in [-1, 1] for j in [-1, 1]]\n possibles = self.squares[p_i].can_move(piece, move_land)\n\n # next append the new list from jumps\n jump_land = [((p_x + i, p_y + j), self.squares[_index(piece, (i, j))])\\\n for j in [-2, 2] for i in [-2, 2]]\n possibles += self.squares[p_i].can_jump(piece, move_land, jump_land)\n\n # clean out the list of duplicates, although there should be none\n return [m for i, m in enumerate(possibles) if m not in possibles[:i]]", "def tournament_selection(population, board):\n t = len(population)\n best = replace(np.random.choice(population), board)\n for _ in (1, t):\n beside = replace(np.random.choice(population), board)\n if len(beside) < len(best):\n best = beside\n return best, beside", "def get_legal_moves(self, player: int) -> np.ndarray:\n stage2 = self.is_stage2()\n action_mask = np.zeros((24, 5, 25), dtype=bool)\n # if stage 1 add set options\n array_board = np.array(self.board)\n if not stage2:\n legal_pos = np.where(array_board == 0)[0]\n for pos in legal_pos:\n if self.is_mill(player, pos, self.board): # current selection completes a mill\n opp_pos = np.where(array_board == -player)[0]\n opp_pos = [opp_p for opp_p in opp_pos if\n not self.is_mill(-player, opp_p, self.board)] # can't remove opponent in mill\n if len(opp_pos) == 0: # exception for the case where all opponents pieces are in mills\n opp_pos = np.where(array_board == -player)[0]\n action_mask[pos, -1, opp_pos] = True\n else:\n action_mask[pos, -1, -1] = True # place piece on board\n else:\n from_pos_cands = np.where(array_board == player)[0]\n for from_pos in from_pos_cands:\n mill_cands = [(orient, adj) for orient, adj in enumerate(self.adjacent[from_pos]) if\n adj is not None and self.board[adj] == 0] # TODO added not, need to validate\n if_played_board = self.board.copy()\n if_played_board[from_pos] = 0\n for (orient, adj) in mill_cands:\n if self.is_mill(player, adj, if_played_board):\n opp_pos = np.where(array_board == -player)[0]\n opp_pos = [opp_p for opp_p in opp_pos if not self.is_mill(-player, opp_p, if_played_board)]\n if len(opp_pos) == 0: # exception for the case where all opponents pieces are in mills\n opp_pos = np.where(array_board == -player)[0]\n action_mask[from_pos, orient, opp_pos] = True\n else:\n action_mask[from_pos, orient, -1] = True\n\n return action_mask", "def get_all_moves(board, player):\n moves = []\n if not (player_has_won(board, player) or\n player_has_won(board, utils.get_opponent(player)) or\n (not is_valid_board(board))):\n for index in range(9):\n if board[index] == config.NO_PLAYER:\n moves += [index]\n return moves", "def valid_attempt(board):\n for i in range(n):\n if [] in board[i]:\n return 0\n return 1", "def is_valid_board(self):\n total = sum(range(1, self.n+1))\n d = {x : [set([]), set([])] for x in range(1, self.n+1)}\n for row_index in range(self.n):\n for col_index in range(self.n):\n num = self.board[row_index][col_index]\n try:\n if row_index in d[num][0] or col_index in d[num][1]:\n print(\"Invalid solution.\")\n return\n except KeyError:\n print(\"Unsolved solution.\") # d[0]\n return\n\n d[num][0].add(row_index)\n d[num][1].add(col_index)\n print(\"Valid solution!\")", "def winner(board):\n columns = []\n for row in board:\n xcount = row.count(X)\n ocount = row.count(O)\n if xcount == 3:\n return X\n if ocount == 3:\n return O\n\n for j in range(len(board)):\n column = [row[j] for row in board]\n columns.append(column)\n \n for j in columns:\n xcounter = j.count(X)\n ocounter = j.count(O)\n if xcounter == 3:\n return X\n if ocounter == 3:\n return O\n \n if board[0][0] == O and board[1][1] == O and board[2][2] == O:\n return O\n if board[0][0] == X and board[1][1] == X and board[2][2] == X:\n return X\n if board[0][2] == O and board[1][1] == O and board[2][0] == O:\n return O\n if board[0][2] == X and board[1][1] == X and board[2][0] == X:\n return X\n\n return None", "def solve(self, board: List[List[str]]) -> None:", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def get_all_possible_moves():\r\n \"\"\"\r\n Creates the labels for the universal chess interface into an array and returns them\r\n \"\"\"\r\n labels_array = []\r\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\r\n numbers = ['1', '2', '3', '4', '5', '6', '7', '8']\r\n promoted_to = ['q', 'r', 'b', 'n']\r\n\r\n for l1 in range(8):\r\n for n1 in range(8):\r\n destinations = [(t, n1) for t in range(8)] + \\\r\n [(l1, t) for t in range(8)] + \\\r\n [(l1 + t, n1 + t) for t in range(-7, 8)] + \\\r\n [(l1 + t, n1 - t) for t in range(-7, 8)] + \\\r\n [(l1 + a, n1 + b) for (a, b) in\r\n [(-2, -1), (-1, -2), (-2, 1), (1, -2), (2, -1), (-1, 2), (2, 1), (1, 2)]]\r\n for (l2, n2) in destinations:\r\n if (l1, n1) != (l2, n2) and l2 in range(8) and n2 in range(8):\r\n move = letters[l1] + numbers[n1] + letters[l2] + numbers[n2]\r\n labels_array.append(move)\r\n for l1 in range(8):\r\n l = letters[l1]\r\n for p in promoted_to:\r\n labels_array.append(l + '2' + l + '1' + p)\r\n labels_array.append(l + '7' + l + '8' + p)\r\n if l1 > 0:\r\n l_l = letters[l1 - 1]\r\n labels_array.append(l + '2' + l_l + '1' + p)\r\n labels_array.append(l + '7' + l_l + '8' + p)\r\n if l1 < 7:\r\n l_r = letters[l1 + 1]\r\n labels_array.append(l + '2' + l_r + '1' + p)\r\n labels_array.append(l + '7' + l_r + '8' + p)\r\n return labels_array", "def test_find(self):\n n = 3\n grid = ['p--', '---', '--m']\n players = princess2.findPrincess(n, grid)\n self.assertEqual(players, [0, 0])", "def generate_options(board: list, player_turn: chr):\n black_marbles, white_marbles = Board.read_marbles(board)\n black_risk, white_risk = Evaluator.assess_risk(black_marbles, white_marbles)\n if player_turn == 'b':\n risk = black_risk\n else:\n risk = white_risk\n\n board_object = Board()\n moves, resulting_boards = board_object.generate_all_boards(board, player_turn)\n # Assume score 0 for now\n Evaluator.score_move(moves, 0)\n for i in range(0, len(moves)):\n Evaluator.calculate_board_score(moves[i], resulting_boards[i], player_turn, risk)\n return moves, resulting_boards", "def winner(board):\n for i in (O, X):\n for j in range(3):\n if (board[j][0] == i and board[j][1] == i and board[j][2] == i):\n return i\n if (board[0][j] == i and board[1][j] == i and board[2][j] == i):\n return i\n if (board[0][0] == i and board[1][1] == i and board[2][2] == i):\n return i\n if (board[2][0] == i and board[1][1] == i and board[0][2] == i):\n return i\n return None", "def get_all_game_pieces_potential_moves(self):\n\n board = self.get_board()\n\n for row in board:\n\n for column in row:\n\n if column is not None:\n\n print(column.get_label(), ': ' , column.get_potential_moves())" ]
[ "0.70995724", "0.68939847", "0.6879628", "0.6865679", "0.6784905", "0.67557615", "0.6732344", "0.6731795", "0.6702802", "0.6696928", "0.6610854", "0.6586503", "0.6561319", "0.6559989", "0.6543688", "0.6528893", "0.6524956", "0.65147173", "0.6503424", "0.6475118", "0.64386654", "0.6423474", "0.6415878", "0.6410982", "0.6400801", "0.63837606", "0.63732636", "0.63594353", "0.6355702", "0.6354126", "0.63507843", "0.6333972", "0.6320734", "0.6317489", "0.6312033", "0.63082993", "0.63051695", "0.63044214", "0.6301482", "0.6296824", "0.62957007", "0.6277561", "0.62771165", "0.6276145", "0.6273107", "0.62630117", "0.62574714", "0.6252719", "0.62469494", "0.6242944", "0.6238361", "0.6236791", "0.62338066", "0.62302995", "0.6226558", "0.62259275", "0.6207201", "0.62035733", "0.62018245", "0.6198512", "0.61841476", "0.6180825", "0.6171909", "0.6170065", "0.6167974", "0.61670923", "0.6153379", "0.6137793", "0.61281407", "0.61278737", "0.61127627", "0.611265", "0.6107991", "0.61032677", "0.60943776", "0.60934776", "0.6086602", "0.6071883", "0.60705477", "0.60688215", "0.60634124", "0.60605633", "0.6056638", "0.60485613", "0.60431546", "0.6037228", "0.60333705", "0.60310346", "0.6019026", "0.60127914", "0.6009768", "0.6007129", "0.60059404", "0.6004036", "0.60030603", "0.5993848", "0.59924746", "0.5992055", "0.5989728", "0.5984709" ]
0.6201891
58
This function finds the flipped pieces and returns them
def flipper(pos, coul, state_board): tflips = [] for i in range(-1,2): # -1 to 1 for j in range(-1,2): #-1 to 1 for k in range(1,9): # 1 to 8 if state_board[pos[0]+i*k][pos[1]+j*k] == 0 or state_board[pos[0]+i*k][pos[1]+j*k] == -1: # if the case is empty or out of bounds break; elif state_board[pos[0]+i*k][pos[1]+j*k] == coul: # if it is the same color if k > 1: # if it is not directly next to pos for h in range(1,k): # add all the pieces in between to tflips if not [pos[0]+i*h,pos[1]+j*h] in tflips: #get rid of duplicates tflips.append([pos[0]+i*h,pos[1]+j*h]) else: break; return tflips
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_flip_piece():\n board = Board(640, 640, 8)\n board.start_game()\n board.gm.flip_pieces = [(3, 3)]\n current_color = board.game_pieces[3][3].color\n board.flip_pieces()\n assert board.game_pieces[3][3].color != current_color\n \n board.gm.flip_pieces = [(3, 4)]\n current_color = board.game_pieces[3][4].color\n board.flip_pieces()\n assert board.game_pieces[3][4].color != current_color", "def find_flippable_disks(self, board, self_color, coords, delta):\r\n found_opponent = False\r\n flip_positions = []\r\n for i in range(1, 8):\r\n dr = coords[0] + i * delta[0]\r\n dc = coords[0] + i * delta[1]\r\n\r\n if self.is_on_board(dr, dc):\r\n if board[dr][dc] == self.EMPTY:\r\n break\r\n elif board[dr][dc] == self.get_opponent_color(self_color):\r\n found_opponent = True\r\n flip_positions.append((dr, dc))\r\n elif board[dr][dc] == self_color:\r\n if found_opponent:\r\n return flip_positions\r\n else:\r\n break", "def extract_move_information(game, board_flip):\n # init arrays for return\n material_1, material_2 = [], []\n board_maps = []\n board_positions_1 = []\n board_positions_2 = []\n\n # get current board\n board = game.board()\n\n lastmove_is_even = True\n # do moves in the current board\n for index, move in enumerate(game.mainline_moves()):\n if board_flip:\n board = board.mirror()\n\n if index % 2 != 0 : \n # helper variable, see below\n lastmove_is_even = False\n\n # get materials from the first move\n material_1.append(get_player_material(board)[0])\n\n # get board map for convolutions\n board_maps_1, board_maps_2 = get_board_map(board)\n board_maps_WB = np.concatenate((board_maps_1, board_maps_2))\n board_maps.append(board_maps_WB)\n \n # get board pieces and positions\n board_positions_1.append(get_board_positions_pieces_OHE(board))\n\n if index % 2 == 0 : \n # helper variable, see below\n lastmove_is_even = True\n\n # get materials from the first move\n material_2.append(get_player_material(board)[1])\n\n # get board pieces and positions\n board_positions_2.append(get_board_positions_pieces_OHE(board))\n \n # mirror board to be able to push next move\n board = board.mirror()\n else: \n if index % 2 == 0 : \n # helper variable, see below\n lastmove_is_even = False\n\n # get materials from the first move\n material_1.append(get_player_material(board)[0])\n\n # get board map for convolutions\n board_maps_1, board_maps_2 = get_board_map(board)\n board_maps_WB = np.concatenate((board_maps_1, board_maps_2))\n board_maps.append(board_maps_WB)\n \n # get board pieces and positions\n board_positions_1.append(get_board_positions_pieces_OHE(board))\n\n if index % 2 != 0 : \n # helper variable, see below\n lastmove_is_even = True\n\n # get materials from the first move\n material_2.append(get_player_material(board)[1])\n\n # get board pieces and positions\n board_positions_2.append(get_board_positions_pieces_OHE(board))\n\n # push to the next move\n board.push(move)\n \n if lastmove_is_even:\n if board_flip:\n board = board.mirror()\n # get materials from the first move\n material_2.append(get_player_material(board)[1])\n\n return material_1, material_2, board_positions_1, board_positions_2, board_maps", "def _label_flips(self, i_row, i_col, direction):\n vert_move, hori_move = i_row, i_col #Initially start at the opposing cell\n candidates = []\n\n #Perhaps could have done if 0 > vert_move > num_rows and 0 > hori_move > num_cols instead!\n while ((self._board[vert_move][hori_move] != self._turn) and not #This can be True in one of two ways! \n self._is_dead_end(vert_move, hori_move, direction) and # think: \"W\" or \" \"\n self._board[vert_move][hori_move] != \" \"):\n candidates.append((vert_move, hori_move))\n if direction == \"n\":\n vert_move += 1\n elif direction == \"ne\":\n hori_move -= 1\n vert_move += 1\n elif direction == \"e\":\n hori_move -= 1\n elif direction == \"se\":\n hori_move -= 1\n vert_move -= 1\n elif direction == \"s\":\n vert_move -= 1\n elif direction == \"sw\":\n hori_move += 1\n vert_move -= 1\n elif direction == \"w\":\n hori_move += 1\n elif direction == \"nw\":\n hori_move += 1\n vert_move += 1\n #Watch out, index can go out of range after several iterations\n #of the loop body, not just once you enter the loop!!!\n\n ending_cell = self._board[vert_move][hori_move] \n if ending_cell == self._turn: #If the ending cell is same color, then flip can be done.\n return (True, candidates)\n else:\n return (False, [])", "def test_flip_loop2():\n conv = ToPointsAndSegments()\n ring = [ (0,0), (3,0), (3.8,2), (5,0), (6.3, 2), (7,0), (10,0), (13,4), (10,5), (7,5), (6.5, 3), (5,5), (3.5,3), (3,5), (0,5), (-2,2), (0,0)]\n conv.add_polygon([ring])\n skel = calc_skel(conv, pause=True, output=True)", "def recognize_pieces(edges, v, squares):\n\n pieces = []\n\n v = cv2.equalizeHist(v)\n for p1, p2 in squares:\n # count the number of slightly centered edges\n occupancy = sum(edges[y][x]\n for x in range(p1.x + 5, p2.x - 5)\n for y in range(p1.y + 5, p2.y - 5))\n\n if occupancy > 70*255:\n corners = (v[p1.y][p1.x], v[p1.y][p2.x],\n v[p2.y][p1.x], v[p2.y][p2.x])\n\n # average v-component of the corners\n avg = sum(map(float, corners)) / len(corners)\n\n # black pixels should be relatively black\n # when compared to the corner average\n black = sum(v[y][x] / avg < 0.2\n for x in range(p1.x, p2.x + 1)\n for y in range(p1.y, p2.y + 1))\n\n if black >= 1000 and black != 1049:\n color = \"B\"\n else:\n color = \"W\"\n\n pieces.append(color)\n else:\n pieces.append(None)\n\n return pieces", "def get_swap_pairs(channels):\n swap_pairs = []\n if ('EEG FP1' in channels) and ('EEG FP2' in channels):\n swap_pairs.append((channels.index('EEG FP1'), channels.index('EEG FP2')))\n if ('EEG Fp1' in channels) and ('EEG Fp2' in channels):\n swap_pairs.append((channels.index('EEG Fp1'), channels.index('EEG Fp2'))) \n if ('EEG F3' in channels) and ('EEG F4' in channels):\n swap_pairs.append((channels.index('EEG F3'), channels.index('EEG F4'))) \n if ('EEG F7' in channels) and ('EEG F8' in channels):\n swap_pairs.append((channels.index('EEG F7'), channels.index('EEG F8'))) \n if ('EEG C3' in channels) and ('EEG C4' in channels):\n swap_pairs.append((channels.index('EEG C3'), channels.index('EEG C4')))\n if ('EEG T3' in channels) and ('EEG T4' in channels):\n swap_pairs.append((channels.index('EEG T3'), channels.index('EEG T4')))\n if ('EEG T5' in channels) and ('EEG T6' in channels):\n swap_pairs.append((channels.index('EEG T5'), channels.index('EEG T6')))\n if ('EEG O1' in channels) and ('EEG O2' in channels):\n swap_pairs.append((channels.index('EEG O1'), channels.index('EEG O2')))\n \n return swap_pairs", "def _get_flips(self, origin, direction, color):\n #initialize variables\n flips = [origin]\n\n for x, y in OthelloBoard._increment_move(origin, direction, self.n):\n #print(x,y)\n if self[x][y] == 0:\n return []\n if self[x][y] == -color:\n flips.append((x, y))\n elif self[x][y] == color and len(flips) > 0:\n #print(flips)\n return flips\n\n return []", "def test_flip_loop():\n conv = ToPointsAndSegments()\n ring = [ (0,0), (3,0), (3.8,2), (4,0), (6,0), (6.3, 2), (7,0), (10,0), (13,4), (10,5), (7,5), (6.5, 3), (6,5), (4,5), (3.5,3), (3,5), (0,5), (-2,2), (0,0)]\n conv.add_polygon([ring])\n skel = calc_skel(conv, pause=True, output=True)", "def get_flip_paris(self):\n # the same names in union and actual\n flip_pair_names = [['rank', 'lank'], ['rkne', 'lkne'], ['rhip', 'lhip'],\n ['rwri', 'lwri'], ['relb', 'lelb'], ['rsho', 'lsho']]\n union_keys = list(self.union_joints.keys())\n union_values = list(self.union_joints.values())\n\n flip_pairs = [[union_keys[union_values.index(name)] for name in pair] for pair in flip_pair_names]\n return flip_pairs", "def _discover_move(self, origin, direction):\n x, y = origin\n color = self[x][y]\n flips = []\n\n for x, y in OthelloBoard._increment_move(origin, direction, self.n):\n if self[x][y] == 0:\n if flips:\n # print(\"Found\", x,y)\n return (x, y)\n else:\n return None\n elif self[x][y] == color:\n return None\n elif self[x][y] == -color:\n # print(\"Flip\",x,y)\n flips.append((x, y))", "def partOne(tileList):\n\n blackTiles = set()\n directionsDict = {\n \"e\" : (0, 1),\n \"se\": (-0.5, 0.5),\n \"sw\": (-0.5, -0.5),\n \"w\": (0, -1),\n \"nw\": (0.5, -0.5),\n \"ne\": (0.5, 0.5),\n }\n\n for tile in tileList:\n\n #print(f\"tile: {tile}\")\n index = 0\n currentPosition = (0, 0)\n while index < len(tile):\n\n direction = tile[index]\n\n if direction in [\"s\", \"n\"]:\n index += 1\n direction += tile[index]\n\n #print(f\"Direction: {direction}\")\n currentPosition = tuple(map(add, currentPosition, directionsDict[direction]))\n index += 1\n #print(f\"currentPosition: {currentPosition}\")\n if currentPosition in blackTiles:\n blackTiles.remove(currentPosition)\n else:\n blackTiles.add(currentPosition)\n\n return blackTiles", "def part2(puzzle_input):\n puzzle_input_arr = puzzle_input.split('\\n')\n visited_squares = {} # (x coordinate, y coordinate): claim ID\n all_claims = set() # Every single claim\n overlapped = set() # Every single claim that is overlapped\n for line in puzzle_input_arr:\n line_elements = re.split('[#@:,x]', line)\n claim = int(line_elements[1])\n x_coord = int(line_elements[2])\n y_coord = int(line_elements[3])\n width = int(line_elements[4])\n height = int(line_elements[5])\n all_claims.add(claim)\n for i in range(width):\n for j in range(height):\n if (i + x_coord, j + y_coord) not in visited_squares:\n visited_squares[(i + x_coord, j + y_coord)] = claim\n else:\n overlapped.add(visited_squares[(i + x_coord, j + y_coord)])\n overlapped.add(claim)\n return list(all_claims.difference(overlapped))[0]", "def fliplr(img):\n inv_idx = torch.arange(img.size(3) - 1, -1, -1).long() # N x C x H x W\n img_flip = img.index_select(3, inv_idx)\n\n return img_flip", "def find_moveable_pieces(self, die, p1): \n moveable = []\n if (p1):\n #must we re-enter?\n if (self.p1vec[0] > 0):\n if (self.free_spot(0, die, p1)):\n b = Board(self.p1vec[:],self.p2vec[:])\n b.move(0, die, p1)\n moveable.append(b)\n #no? ok then generate the moves\n else:\n for i in range(1, 25):\n if (self.p1vec[i] > 0):\n if (self.free_spot(i, die, p1)):\n b = Board(self.p1vec[:],self.p2vec[:])\n b.move(i, die, p1)\n moveable.append(b)\n else:\n #must we re-enter?\n if (self.p2vec[0] > 0):\n if (self.free_spot(0, die, p1)):\n b = Board(self.p1vec[:],self.p2vec[:])\n b.move(0, die, p1)\n moveable.append(b)\n #no? ok then generate the moves\n else:\n for i in range(1, 25):\n if (self.p2vec[i] > 0):\n if (self.free_spot(i, die, p1)):\n b = Board(self.p1vec[:],self.p2vec[:])\n b.move(i, die, p1)\n moveable.append(b)\n return moveable", "def make_flips(self, move, player, board, direction):\r\n curr = move + direction\r\n opp = self.opponent(player)\r\n while(board[curr]==opp):\r\n board[curr] = player\r\n curr += direction\r\n #return board\r", "def find_moves(self):\n\n from itertools import product\n free_position = self.find_free()\n return [list(free_position+i) for i in [[0,1],[1,0],[-1,0],[0,-1]] if tuple(i+free_position) in product(range(self.size),repeat=2)]", "def decryptStory():\n \n r=loadWords()\n\n m1=getStoryString()\n \n p=findBestShift(r, m1)\n \n strans=applyShift(m1,p)\n return strans", "def solved(self):\n\n for p in self.pieces:\n if [p.x, p.y, p.z].count(0) < 2: # Filter out centers.\n # Compare each sticker color with its respective center.\n for i, s in enumerate(p.stickers):\n cen = self.getpiece(s.x, s.y, s.z).getsticker(s.x, s.y, s.z)\n if s.c != cen.c:\n return False\n\n return True", "def solve_part2(start):\n inputs = load_inputs(False)\n all_matches = []\n tiles = inputs.keys()\n for elem in tiles:\n matches = defaultdict(list)\n for elem2 in tiles:\n if elem != elem2 and compare_tile(inputs[elem], inputs[elem2]):\n l = matches[elem]\n l.append(elem2)\n if matches[elem]:\n all_matches.append(matches[elem])\n\n # start frmo an aribtrary corner\n # find a match, rotate me so that the match is along the right side\n # fill in properly oriented match\n # repeat, for row = 1+, consider top-match and left-match\n\n # for eery rotations / orientation, look fot the pattern", "def find_flipped_bit(s1, s2):\n if len(s1) == 0 or len(s2) == 0:\n raise ValueError(\"Empty string inputted.\")\n\n if len(s1) != len(s2):\n raise ValueError(\"Strings compared in gray code must have the same length.\")\n \n if any([x != \"0\" and x != \"1\" for x in s1]) or any([x != \"0\" and x != \"1\" for x in s2]):\n raise ValueError(f\"One of inputs {s1}, {s2} is not a valid binary string.\")\n \n # Sum the strings elementwise modulo 2; the sum will be 1 only in the slot \n # where we flipped a bit \n string_sums = [(int(s1[i]) + int(s2[i])) % 2 for i in range(len(s1))]\n\n if string_sums.count(1) == 0:\n raise ValueError(f\"Strings {s1} and {s2} are the same.\")\n elif string_sums.count(1) > 1:\n raise ValueError(f\"Strings {s1} and {s2} are not ordered in a gray code.\")\n\n return string_sums.index(1)", "def consecutiveFlip(currPath, i):\n j = (i + 1)%len(currPath)\n temp = currPath[i]\n currPath[i] = currPath[j]\n currPath[j] = temp", "def get_mask_puzzle_pieces_background(background: np.ndarray,\n image: np.ndarray) -> list:\n background = cv.cvtColor(background, cv.COLOR_BGR2GRAY)\n image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n image_mask = cv.absdiff(background, image)\n image_mask = cv.medianBlur(image_mask, 11)\n ret, image_mask = cv.threshold(image_mask, 20, 255, cv.THRESH_BINARY)\n image_mask = cv.morphologyEx(image_mask,\n cv.MORPH_OPEN,\n (5, 5),\n iterations=3)\n image_mask = cv.filter2D(image_mask, -1, KERNEL_SHARPENING)\n puzzle_pieces, hiearchy = cv.findContours(image_mask,\n cv.RETR_EXTERNAL,\n cv.CHAIN_APPROX_NONE)\n pieces_mask = []\n for piece in range(len(puzzle_pieces)):\n blank = np.zeros((image_mask.shape[0], image_mask.shape[1]))\n single_piece = cv.drawContours(blank, puzzle_pieces, piece, 255,\n cv.FILLED)\n single_piece = cv.cvtColor(single_piece.astype(np.uint8),\n cv.COLOR_GRAY2BGR)\n pieces_mask.append(single_piece)\n return pieces_mask", "def _check_flip(origin_imgs, result_imgs):\n h, w, c = origin_imgs.shape\n for i in range(h):\n for j in range(w):\n for k in range(c):\n if result_imgs[i, j, k] != origin_imgs[i, w - 1 - j, k]:\n return False\n return True", "def generate_plays(mask, order_by_mid=False):\n\n position_map = [2 ** i for i in range(49)] # List of a binary representation of individual pieces in the board\n\n available_plays = []\n for column_number in range(7):\n column_values = position_map[7 * column_number: 7 * column_number + 6] # Minus extra cell on top of the board\n for value in column_values:\n if mask & value == 0:\n available_plays.append(value)\n break\n\n if order_by_mid:\n available_plays = [available_plays.pop(i // 2) for i in reversed(range(len(available_plays)))]\n\n return available_plays", "def _get_death_zone_changes(self) -> Tuple[List[Square], List[Square]]:\r\n\r\n eliminated_squares: List[Square] = []\r\n new_corners: List[Square] = []\r\n\r\n original_corners: Dict[str, Square] = self._get_corner_squares()\r\n eliminated_squares.extend(\r\n self._select_squares(original_corners[Board._TOP_LEFT].pos,\r\n original_corners[Board._TOP_RIGHT].pos,\r\n offset = Pos2D(0, 1)))\r\n eliminated_squares.extend(\r\n self._select_squares(original_corners[Board._TOP_RIGHT].pos,\r\n original_corners[Board._BOTTOM_RIGHT].pos,\r\n offset = Pos2D(1, 1)))\r\n eliminated_squares.extend(\r\n self._select_squares(original_corners[Board._BOTTOM_LEFT].pos,\r\n original_corners[Board._BOTTOM_RIGHT].pos,\r\n offset = Pos2D(0, 1)))\r\n # TODO Consider that this means that top left will be in eliminated_squares TWICE due to the first argument\r\n # to _select_squares always being inclusive.\r\n eliminated_squares.extend(\r\n self._select_squares(original_corners[Board._TOP_LEFT].pos,\r\n original_corners[Board._BOTTOM_LEFT].pos,\r\n offset=Pos2D(1, 0)))\r\n\r\n new_corners.append(\r\n self.squares[original_corners[Board._TOP_LEFT].pos\r\n + Pos2D(1, 1)])\r\n new_corners.append(\r\n self.squares[original_corners[Board._BOTTOM_LEFT].pos\r\n + Pos2D(1, -1)])\r\n new_corners.append(\r\n self.squares[original_corners[Board._BOTTOM_RIGHT].pos\r\n + Pos2D(-1, -1)])\r\n new_corners.append(\r\n self.squares[original_corners[Board._TOP_RIGHT].pos\r\n + Pos2D(-1, 1)])\r\n\r\n\r\n\r\n return (eliminated_squares, new_corners)", "def swissPairings():\n\n p = playerStandings()\n pair = []\n for x in range(0, len(p), 2):\n y = (p[x][0],p[x][1],p[x+1][0],p[x+1][1])\n pair.append(y)\n return pair", "def findNondegeneratePlacements( self):\n # Rotate counterclockwise by 90 degrees around the v'th vertex.\n r90 = np.array( [ [0,1], [-1,0] ], dtype=int)\n # Flip the piece along the vertical axis through the v'th vertex.\n fv = np.array( [ [1,0], [0,-1] ], dtype=int)\n\n self.placements = []\n uniques = set() # Unique placements generated so far\n identity = np.array( [ [1,0], [0,1] ], dtype=int)\n T = identity[:,:]\n for i in xrange(self.nVertices):\n geo = self.geo[:,:]\n geo -= geo[i] # Place i'th vertex at (0,0)\n for r in xrange(4):\n T = T.dot( r90)\n for f in xrange(2):\n T = T.dot( fv)\n pk = placementKey( geo.dot(T))\n if (not pk in uniques):\n uniques.add( pk)\n self.placements.append( (i, T))\n # After four rotations and two flips, we should be back to\n # the original position.\n assert( np.array_equal( T, identity))\n\n return self.placements", "def contiguousFlip(currPath, i, j):\n if i != j and (i+1)%len(currPath)!=j and (j+1)%len(currPath)!=i:\n iP = i\n jP = j\n if (i < j):\n maxx=(j-i+1)//2\n else:\n maxx=(j+1+len(currPath)-i)//2\n for _ in range(maxx):\n temp = currPath[iP]\n currPath[iP] = currPath[jP]\n currPath[jP] = temp\n iP = (iP + 1)%len(currPath)\n jP = (jP - 1)%len(currPath)", "def flankingDirections(self, col, row, playerColor):\n flankingDirections = []\n for direction in self.directions:\n try:\n adjacent = direction(self, col, row)\n if adjacent.color != playerColor and adjacent.color != \"empty\":\n while True:\n colNext = adjacent.col\n rowNext = adjacent.row\n adjacent = direction(self, colNext, rowNext)\n if adjacent.color == playerColor: # successfully flanked opposing piece\n flankingDirections.append(direction)\n break\n if adjacent.color == \"empty\":\n break\n else:\n continue\n except offBoardError:\n continue\n return flankingDirections", "def GetFlippedPoints(paths,array):\n #this may not work for double ups?\n\n for i in paths:\n jprev = i[0]\n for j in i[1:]:\n if abs(j[0] - jprev[0])>1:#top/bottom edge jump\n array[jprev[0]][j[1]][jprev[1]] *=-1\n elif abs(j[2] - jprev[2])>1:#left/right edge jumps\n if (j[2] - jprev[2])<0:#off right edge\n array[j[0]][j[1]][j[2]] *=-1\n elif j[2] - jprev[2]>0:#off left edge\n array[jprev[0]][j[1]][jprev[2]] *=-1\n elif j[0] - jprev[0]==1:#vertical down movement\n array[j[0]][j[1]][j[2]] *=-1\n elif j[0] - jprev[0]==-1:#vertical up movement\n array[jprev[0]][j[1]][jprev[2]] *=-1\n elif j[2] - jprev[2]==1:#right movement\n array[j[0]][j[1]][j[2]] *=-1\n elif j[2] - jprev[2]==-1:#left movement #Edoesnt get called? int/float error?\n array[jprev[0]][j[1]][jprev[2]] *=-1\n jprev=j\n return(array)", "def flip(imgs):\n x = random.choice([-1, 0, 1, 2])\n if x == 2:\n return imgs\n else:\n return [cv2.flip(img, x) for img in imgs]", "def __get_neutral_tiles(self) -> List[List[int]]:\n neutral_tiles = []\n for x in range(self.num_columns):\n for y in range(self.num_rows):\n if self.is_flippable_tile([y, x]):\n neutral_tiles.append([y, x])\n return neutral_tiles", "def matches(hand):\n return list(sorted(match_iter(hand), reverse=True))", "def possible(state_board,turn):\n\tlegal_moves = [] # list of legal moves as Move objects\n\tfor i in range(1,9):\n\t\tfor j in range(1,9):\n\t\t\tif state_board[i][j] == 0:\n\t\t\t\tif flipper([i,j],turn,state_board) != []:\n\t\t\t\t\t# if there are flipped pieces, it appends this move to\n\t\t\t\t\t# the legal moves and draws it in light greens\n\t\t\t\t\tlegal_moves.append((i,j))\n\t\t\t\t\tdrawPiece((i,j),3)\n\t\t\t\telse:\n\t\t\t\t\t# if it is 0 and is not legal, make sure it is of bgcolor\n\t\t\t\t\tdrawPiece((i,j),0)\n\t\n\treturn legal_moves", "def extensions(self):\n def swap(marker, mx, x2, my, y2):\n \"\"\"\n If proper conditions are met, jump over the peg depending on the\n condition\n @param marker: map, list of list\n @param mx: Original x coordinate\n @param x2: Replacement x coordinate\n @param my: Original y coordinate\n @param y2: Replacement y coordinate\n @return: list[list[str]]\n \"\"\"\n # creates a deep copy\n # each if statement checks whether to move the piece N S E W by\n # comparing the current coordinates and the new coordinates\n map = [x[:] for x in marker]\n map[my][mx], map[y2][x2] = map[y2][x2], map[my][mx]\n if my < y2:\n map[my+1][mx] = \".\"\n elif my > y2:\n map[my-1][mx] = \".\"\n elif mx < x2:\n map[my][mx+1] = \".\"\n else:\n map[my][mx-1] = \".\"\n return map\n\n def legal_move(marker, x, y, direction):\n \"\"\"\n Checks if there is a potential move at the direction of\".\"\n coordinate\n @param marker: map of the board\n @param x: x coordinate\n @param y: y coordinate\n @param direction : North South East West of the \".\"\n @return: boolean\n \"\"\"\n # first if statement determines the directions\n # second if statement checks if the \"potential move\" is within the index\n if direction == \"N\":\n if 0 <= y-2 < len(marker):\n return marker[y-2][x] == marker[y-1][x] == '*'\n if direction == \"S\":\n if 0 <= y+2 < len(marker):\n return marker[y+2][x] == marker[y+1][x] == '*'\n if direction == \"W\":\n if 0 <= x-2 < len(marker[0]):\n return marker[y][x-2] == marker[y][x-1] == '*'\n if direction == \"E\":\n if 0 <= x+2 < len(marker[0]):\n return marker[y][x+2] == marker[y][x+1] == '*'\n return False\n\n combos = []\n # For loops go through the coordinates\n # each if statement checks and appends the new scenario\n # iff there is a legal move available\n for y in range(len(self._marker)):\n for x in range(len(self._marker[0])):\n if self._marker[y][x] == '.':\n if legal_move(self._marker, x, y, 'N'):\n combos.append(GridPegSolitairePuzzle(swap(self._marker,\n x, x, y, y-2), self._marker_set))\n if legal_move(self._marker, x, y, 'S'):\n combos.append(GridPegSolitairePuzzle(swap(self._marker,\n x, x, y, y+2), self._marker_set))\n if legal_move(self._marker, x, y, 'W'):\n combos.append(GridPegSolitairePuzzle(swap(self._marker,\n x, x-2, y, y), self._marker_set))\n if legal_move(self._marker, x, y, 'E'):\n combos.append(GridPegSolitairePuzzle(swap(self._marker,\n x, x+2, y, y), self._marker_set))\n return combos", "def get_blocks(index):\r\n #call with -1 to get full blocklist\r\n #the reason this is a function instead of just a list is that originally\r\n #i had plans to support dynamic tilesets, for example if only a certain\r\n #number of each tile were available. in the end this didnt happen though\r\n all_blocks = [\r\n [[0,0,0],[1,1,1],[0,0,0]], #0 - (horizontal passage)\r\n [[0,1,0],[0,1,0],[0,1,0]], #1 | (vertical passage)\r\n \r\n [[0,0,0],[1,1,0],[0,1,0]], #2 >v various L-junctions\r\n [[0,1,0],[1,1,0],[0,0,0]], #3 >^\r\n [[0,0,0],[0,1,1],[0,1,0]], #4 ^>\r\n [[0,1,0],[0,1,1],[0,0,0]], #5 v>\r\n \r\n [[0,0,0],[0,0,0],[0,0,0]], #6 0 empty\r\n [[0,1,0],[1,1,1],[0,1,0]], #7 + cross\r\n \r\n [[0,1,0],[1,1,1],[0,0,0]], #8 _|_ various T-junctions\r\n [[0,0,0],[1,1,1],[0,1,0]], #9 T\r\n [[0,1,0],[1,1,0],[0,1,0]], #10 -|\r\n [[0,0,0],[1,1,1],[0,0,0]]] #11 |-\r\n \r\n# [[0,1,0],[0,1,0],[0,0,0]], #12 #unsued \"dead end\" pieces\r\n# [[0,0,0],[0,1,0],[0,1,0]], #13\r\n# [[0,0,0],[0,1,1],[0,0,0]], #14\r\n# [[0,0,0],[1,1,0],[0,0,0]] ]#15\r\n if index == -1:\r\n return all_blocks\r\n else:\r\n return all_blocks[index]", "def set_flipped(self, x, y):\n self.pieces[x + (y * self.width)].set_flipped()", "def GetFlippedPoints2(paths,blankarray):\n #this may not work for double ups?\n for i in paths:\n jprev = i[0]\n for j in i[1:]:\n if abs(j[0] - jprev[0])>1:#top edge jump\n blankarray[jprev[0]][0][jprev[1]] =-1\n elif abs(j[1] - jprev[1])>1:#left/right edge jumps\n if (j[1] - jprev[1])<0:#off right edge\n blankarray[j[0]][1][j[1]] =-1\n elif j[1] - jprev[1]>0:#off left edge\n blankarray[jprev[0]][1][jprev[1]] =-1\n elif j[0] - jprev[0]==1:#vertical down movement\n blankarray[j[0]][0][j[1]] =-1\n elif j[0] - jprev[0]==-1:#vertical up movement\n blankarray[jprev[0]][0][jprev[1]] =-1\n elif j[1] - jprev[1]==1:#right movement\n blankarray[j[0]][1][j[1]] =-1\n elif j[1] - jprev[1]==-1:#left movement #Edoesnt get called? int/float error?\n blankarray[jprev[0]][1][jprev[1]] =-1\n jprev=j\n return(blankarray)", "def _flip_dirs(self, adj_opp_cells):\n lst = []\n for cell in adj_opp_cells:\n lst.append(self._label_flips(cell[0], cell[1], cell[2]))\n\n #print(\"FOR TESTING: lst: \", lst) #FOR TESTING\n lst2 = []\n for e in lst: #lst has elements of the form (boolean, list)\n if e[0] == True:\n lst2.append(e)\n\n if lst2 == []:\n return (False, lst2)\n else:\n lst3 = []\n for e in lst2:\n for t in e[1]:\n lst3.append(t)\n return (True, lst3)", "def flip(self):\n if self.color != \"empty\":\n self.color = self.oppositeColor()\n else:\n raise pieceError(self.col, self.row)", "def traverseBishop(self):\n\t\tmoves = np.empty(14, dtype=object)\n\t\tcnt = [0]\n\t\tPiece.traverse(self, cnt, moves, -1, -1)\n\t\tPiece.traverse(self, cnt, moves, -1, 1)\n\t\tPiece.traverse(self, cnt, moves, 1, -1)\n\t\tPiece.traverse(self, cnt, moves, 1, 1)\n\t\treturn moves[:cnt[0]]", "def testMoreStereo(self):\r\n smi_and_cansmi = [\r\n ('Cl[C@](C)(I)Br', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](C)(I)Br', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](C)(Br)Cl', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](C)(Br)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](Cl)(Br)C', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](Cl)(Br)C', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](Br)(I)C', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](Br)(I)C', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](C)(Cl)I', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](C)(Cl)I', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](C)(Cl)Br', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](C)(Cl)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](Cl)(I)Br', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](Cl)(I)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](Br)(Cl)I', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](Br)(Cl)I', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](Br)(Cl)C', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](Br)(Cl)C', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](C)(Br)I', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](C)(Br)I', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@](I)(Br)C', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](I)(Br)C', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@@](I)(Br)Cl', 'C[C@](Cl)(Br)I'),\r\n ('C[C@](I)(Br)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@](Cl)(C)Br', 'C[C@](Cl)(Br)I'),\r\n ('I[C@@](Cl)(C)Br', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](Cl)(Br)I', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](Cl)(Br)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](C)(I)Cl', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](C)(I)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('I[C@@](Br)(C)Cl', 'C[C@](Cl)(Br)I'),\r\n ('I[C@](Br)(C)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](Br)(I)Cl', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](Br)(I)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('C[C@](I)(Cl)Br', 'C[C@](Cl)(Br)I'),\r\n ('C[C@@](I)(Cl)Br', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@@](I)(C)Br', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@](I)(C)Br', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](I)(C)Cl', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](I)(C)Cl', 'C[C@@](Cl)(Br)I'),\r\n ('Cl[C@](Br)(C)I', 'C[C@](Cl)(Br)I'),\r\n ('Cl[C@@](Br)(C)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](Cl)(C)I', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](Cl)(C)I', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@@](I)(Cl)C', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@](I)(Cl)C', 'C[C@@](Cl)(Br)I'),\r\n ('Br[C@](Cl)(I)C', 'C[C@](Cl)(Br)I'),\r\n ('Br[C@@](Cl)(I)C', 'C[C@@](Cl)(Br)I')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def swissPairings(t_name):\n rank = playerStandings(t_name)\n pairs = []\n if len(rank) % 2 != 0:\n for i in range(len(rank), 0, -1):\n if played(rank[i-1][0], rank[i-1][0]) == False:\n ele = rank[i-1]\n reportMatch(ele[0], ele[0], ele[0])\n rank.remove(ele)\n break\n for i in range(0, len(rank)/2):\n p1 = rank[0]\n rank.remove(p1)\n for player in rank:\n if(played(p1[0], player[0])):\n continue\n p2 = player\n rank.remove(p2)\n break\n pairs.append((p1[0], p1[1], p2[0], p2[1]))\n return pairs", "def get_pieces_left(board, piece):\r\n\tpieces = 0\r\n\tfor row in board:\r\n\t\tfor col in row:\r\n\t\t\tif col == piece:\r\n\t\t\t\tpieces += 1\r\n\r\n\treturn pieces", "def computeSide(self):\n side = 0\n for c in 'abcdefgh':\n side += self.getPieceType(c,1)\n side += self.getPieceType(c,2)\n side -= self.getPieceType(c,7)\n side -= self.getPieceType(c,8) \n rospy.loginfo(\"Computed side value of: %d\" % side)\n if side > 0:\n self.side = self.WHITE # good to go\n else:\n self.side = self.BLACK \n # need to setup board \n temp_board = BoardState(self.side) \n for i in range(8):\n temp_board.setPiece(i, 2, self.makePiece(ChessPiece.WHITE_PAWN, self.getPiece(7-i, 7)) )\n temp_board.setPiece(i, 7, self.makePiece(ChessPiece.BLACK_PAWN, self.getPiece(7-i, 2)) )\n\n temp_board.setPiece('a', 1, self.makePiece(ChessPiece.WHITE_ROOK, self.getPiece('h',8)) )\n temp_board.setPiece('b', 1, self.makePiece(ChessPiece.WHITE_KNIGHT, self.getPiece('g',8)))\n temp_board.setPiece('c', 1, self.makePiece(ChessPiece.WHITE_BISHOP, self.getPiece('f',8)))\n temp_board.setPiece('d', 1, self.makePiece(ChessPiece.WHITE_QUEEN, self.getPiece('e',8)))\n temp_board.setPiece('e', 1, self.makePiece(ChessPiece.WHITE_KING, self.getPiece('d',8)))\n temp_board.setPiece('f', 1, self.makePiece(ChessPiece.WHITE_BISHOP, self.getPiece('c',8)))\n temp_board.setPiece('g', 1, self.makePiece(ChessPiece.WHITE_KNIGHT, self.getPiece('b',8)))\n temp_board.setPiece('h', 1, self.makePiece(ChessPiece.WHITE_ROOK, self.getPiece('a',8)))\n\n temp_board.setPiece('a', 8, self.makePiece(ChessPiece.BLACK_ROOK, self.getPiece('h',1)) )\n temp_board.setPiece('b', 8, self.makePiece(ChessPiece.BLACK_KNIGHT, self.getPiece('g',1)) )\n temp_board.setPiece('c', 8, self.makePiece(ChessPiece.BLACK_BISHOP, self.getPiece('f',1)) )\n temp_board.setPiece('d', 8, self.makePiece(ChessPiece.BLACK_QUEEN, self.getPiece('e',1)) )\n temp_board.setPiece('e', 8, self.makePiece(ChessPiece.BLACK_KING, self.getPiece('d',1)) )\n temp_board.setPiece('f', 8, self.makePiece(ChessPiece.BLACK_BISHOP, self.getPiece('c',1)) )\n temp_board.setPiece('g', 8, self.makePiece(ChessPiece.BLACK_KNIGHT, self.getPiece('b',1)) )\n temp_board.setPiece('h', 8, self.makePiece(ChessPiece.BLACK_ROOK, self.getPiece('a',1)) ) \n\n self.values = temp_board.values\n self.printBoard()\n\n self.last_move = \"go\"", "def GetNamesOfPieces(self):\n assert self.RecoveredEnoughPieces()\n result = []\n base = self.fileName + dibs_constants.fileSeparator \n for p in self.piecesRecovered.keys():\n result.append(base + p)\n return result", "def flip(self, x, y):\n self.pieces[x + (y * self.width)].flip()", "def reverse_difference():", "def _flip_adjacent_fluxes(l: Lattice, bonds: np.ndarray, fluxes: np.ndarray):\n for edge_index, (p_a, p_b) in enumerate(l.edges.adjacent_plaquettes):\n if (p_a == INVALID) or (p_b == INVALID):\n break\n if (fluxes[p_a] == -1) and (fluxes[p_b] == -1):\n bonds[edge_index] *= -1\n fluxes[p_a] *= -1\n fluxes[p_b] *= -1\n\n #attempt at vectorising, check this at somepoint\n #adj_fluxes = fluxes[l.edges.adjacent_plaquettes]\n #to_flip = np.where((adj_fluxes[:, 0] == -1) & (adj_fluxes[:, 1] == -1))\n #bonds[to_flip] *= -1\n #fluxes_to_flip = l.edges.adjacent_plaquettes[to_flip].flatten()\n #fluxes[fluxes_to_flip] *= -1\n\n return bonds, fluxes", "def footprint_corner_indices():", "def predictSideFromFEN(fen):\n\n # remove spaces values (numbers) from fen\n fen = re.sub('\\d','',fen)\n \n #split fen to top half and bottom half (top half first)\n parts = fen.split('/')\n top = list(''.join(parts[:4]))\n bottom = list(''.join(parts[4:]))\n \n # If screenshot is aligned from POV of white to play, we'd expect\n # top to be mostly black pieces (lowercase)\n # and bottom to be mostly white pieces (uppercase), so lets count\n top_count_white = sum(list(map(lambda x: ord(x) <= ord('Z'), top)))\n bottom_count_white = sum(list(map(lambda x: ord(x) <= ord('Z'), bottom)))\n\n top_count_black = sum(list(map(lambda x: ord(x) >= ord('a'), top)))\n bottom_count_black = sum(list(map(lambda x: ord(x) >= ord('a'), bottom)))\n\n # If more white pieces on top side, or more black pieces on bottom side, black to play\n if (top_count_white > bottom_count_white or top_count_black < bottom_count_black):\n return 'b'\n\n # Otherwise white\n return 'w'", "def vflip(self):\n for y in range(0, self.height // 2):\n for x in range(0, self.width):\n self._chars[x][y], self._chars[x][self.height - 1 - y] = self._chars[x][self.height - 1 - y], self._chars[x][y]\n self._fginfo[x][y], self._fginfo[x][self.height - 1 - y] = self._fginfo[x][self.height - 1 - y], self._fginfo[x][y]\n self._bginfo[x][y], self._bginfo[x][self.height - 1 - y] = self._bginfo[x][self.height - 1 - y], self._bginfo[x][y]\n self._strDirty = True", "def _flip(self,update_stack,index):\n cell=game.get_cell(index)\n if cell.ifFlipped()==False:\n cell.flip()\n game.decrease_remain()\n if cell.isMine()==False and cell.get_neighbor()>0:\n update_stack[str(index)]=cell.get_neighbor()\n return\n elif cell.isMine()==False and cell.get_neighbor()==0:\n update_stack[str(index)]=cell.get_neighbor()\n temp_r=index/self._col_num\n temp_c=index%self._col_num\n shift=[[temp_r+dr,temp_c+dc] for dr in self.shifts for dc in self.shifts\n if [temp_r+dr,temp_c+dc]!=[temp_r,temp_c]\n and temp_r+dr in range(0,self._row_num)\n and temp_c+dc in range(0,self._col_num)]\n for s in shift:\n self._flip(update_stack,s[0]*self._col_num+s[1])", "def heuristic_takeAllPiece(board, player):\n\n if player is board._WHITE:\n return board._nbWHITE - board._nbBLACK\n \n return board._nbBLACK - board._nbWHITE", "def test_flip_vertical() -> None:\n original = create_image(3, 2)\n set_color(original, 0, 0, create_color(0, 0, 0))\n set_color(original, 1, 0, create_color(90, 90, 90))\n set_color(original, 2, 0, create_color(255, 255, 255))\n set_color(original, 0, 1, create_color(10, 10, 10))\n set_color(original, 1, 1, create_color(0, 0, 0))\n set_color(original, 2, 1, create_color(90, 90, 90))\n \n expected = create_image(3, 2)\n set_color(expected, 0, 0, create_color(10, 10, 10))\n set_color(expected, 1, 0, create_color(0, 0, 0))\n set_color(expected, 2, 0, create_color(90, 90, 90))\n set_color(expected, 0, 1, create_color(0, 0, 0))\n set_color(expected, 1, 1, create_color(90, 90, 90))\n set_color(expected, 2, 1, create_color(255, 255, 255))\n \n flipped_vertical = flip_vertical(original)\n \n for x, y, col in flipped_vertical: # tests each colour of each pixel of the filtered sample image and compares it to the expected image\n check_equal('Checking pixel @(' + str(x) + ', ' + str(y) + ')', col, get_color(expected, x, y))", "def flip(self):", "def get_played_positions(board):\n return np.argwhere(board.state != -1)", "def circnot(a):\n circnot= []\n length=0\n z = [1]\n circle=True\n i=0\n while circle:\n if a[i] not in z:\n z.append(a[i])\n i=a[i]-1\n else :\n circle=False\n newlength = len(z)\n #print(\"(%s)\"%(z[length:newlength]))\n circnot.append(tuple(z[length:newlength]))\n print(tuple(z[length:newlength]))\n\n length = len(z)\n\n for idx,item in enumerate(a):\n if item not in z:\n circle = True\n i = idx\n break\n\n return circnot", "def _flip_isolated_fluxes(l: Lattice, bonds: np.ndarray, fluxes: np.ndarray):\n indices_to_flip = np.where(fluxes == -1)[0]\n\n def pos(p):\n return l.plaquettes[p].center\n\n def distance_func(a, b):\n return straight_line_length(pos(a), pos(b))\n\n close_pairs = _greedy_plaquette_pairing(indices_to_flip, distance_func)\n\n for a, b in close_pairs:\n plaquettes, edges_to_flip = path_between_plaquettes(l,\n a,\n b,\n maxits=l.n_edges)\n bonds[edges_to_flip] *= -1\n fluxes[a] *= -1\n fluxes[b] *= -1\n\n return bonds, fluxes", "def decryptStory():\n return applyShift(getStoryString(), findBestShift(loadWords(), getStoryString()))", "def pieces(relatorlist):\n F,rels=fg.parseinputwords(relatorlist)\n if not all(r==F.cyclic_reduce(r) for r in rels):\n raise ValueError(\"Relators are not cyclically reduced.\")\n pieces=set()\n irels=[rel for rel in itertools.chain.from_iterable(zip([w() for w in rels],[(w**(-1))() for w in rels]))] # arrange relators and inverses in a list of the form relator1, inverse of relator1, relator2, inverse of relator2,...\n drels=[x+x for x in irels]\n for relatorindex in range(len(rels)): # only need to search relators for candidate pieces, since a piece contained in inverse will be inverse of piece contained in relator\n relator=irels[2*relatorindex]\n for L in range(1,1+len(relator)):\n for startingindex in range(len(relator)):\n p=(relator+relator)[startingindex:startingindex+L] # the subword of length L starting at index i in reltaor as a cyclic word\n # now we need to check if p is a piece\n # we do not need to check lower relatorindices, because we already scanned those relators for pieces\n if any(p in x for x in [(relator+relator)[startingindex+1:len(relator)+startingindex+L-1]]+[drels[i] for i in range(2*relatorindex+1,len(drels))]):# found a matching subword, p is a piece\n pieces.add(p)\n pieces.add(''.join(reversed(p.swapcase())))\n return pieces", "def MoveBishop(ChessPiece, NewSquare, AllPieces, Board):\n\n\tfor Bishop in AllPieces[ChessPiece]:\n\t\tX_BishopSquare = int(Bishop['CurrentSquare'][1])-1 # Integer Coordinate\n\t\tY_BishopSquare = int(RowToNumber(Bishop['CurrentSquare'][0])) # Integer Coordinate\n\n\t\t# Diagonal Up Right..\n\t\tfor i in range(1, 8):\n\t\t\tif X_BishopSquare+i <= 7 and Y_BishopSquare+i <= 7:\n\t\t\t\tif NewSquare == (NumberToRow(str(Y_BishopSquare+i))+str(X_BishopSquare+i+1)):\n\t\t\t\t\tif Board[X_BishopSquare+i][Y_BishopSquare+i] is not ChessPiece[:5]:\n\t\t\t\t\t\tBoard[X_BishopSquare][Y_BishopSquare] = 'Free'\n\t\t\t\t\t\tBoard[X_BishopSquare+i][Y_BishopSquare+i] = ChessPiece[:5]\n\t\t\t\t\t\treturn Bishop['CurrentSquare']\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\t# Diagonal Up Left..\n\t\tfor i in range(1, 8): \n\t\t\tif X_BishopSquare+i <= 7 and Y_BishopSquare-i >= 0:\n\t\t\t\tif NewSquare == (NumberToRow(str(Y_BishopSquare-i))+str(X_BishopSquare+i+1)):\n\t\t\t\t\tif Board[X_BishopSquare+i][Y_BishopSquare-i] is not ChessPiece[:5]:\n\t\t\t\t\t\tBoard[X_BishopSquare][Y_BishopSquare] = 'Free'\n\t\t\t\t\t\tBoard[X_BishopSquare+i][Y_BishopSquare-i] = ChessPiece[:5]\n\t\t\t\t\t\treturn Bishop['CurrentSquare']\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\t# Diagonal Down Right..\n\t\tfor i in range(1, 8): \n\t\t\tif X_BishopSquare-i >= 0 and Y_BishopSquare+i <= 7:\n\t\t\t\tif NewSquare == (NumberToRow(str(Y_BishopSquare+i))+str(X_BishopSquare-i+1)):\n\t\t\t\t\tif Board[X_BishopSquare-i][Y_BishopSquare+i] is not ChessPiece[:5]:\n\t\t\t\t\t\tBoard[X_BishopSquare][Y_BishopSquare] = 'Free'\n\t\t\t\t\t\tBoard[X_BishopSquare-i][Y_BishopSquare+i] = ChessPiece[:5]\n\t\t\t\t\t\treturn Bishop['CurrentSquare']\n\t\t\telse:\n\t\t\t\tbreak\n\t\t\n\t\t# Diagonal Down Left..\n\t\tfor i in range(1, 8):\n\t\t\tif X_BishopSquare-i >= 0 and Y_BishopSquare-i >= 0:\n\t\t\t\tif NewSquare == (NumberToRow(str(Y_BishopSquare-i))+str(X_BishopSquare-i+1)):\n\t\t\t\t\tif Board[X_BishopSquare-i][Y_BishopSquare-i] is not ChessPiece[:5]:\n\t\t\t\t\t\tBoard[X_BishopSquare][Y_BishopSquare] = 'Free'\n\t\t\t\t\t\tBoard[X_BishopSquare-i][Y_BishopSquare-i] = ChessPiece[:5]\n\t\t\t\t\t\treturn Bishop['CurrentSquare']\n\t\t\telse:\n\t\t\t\tbreak\n\n\t# If Both Bishops Don't Match..\n\treturn None", "def td_flip(self):\n self.cw_rotate()\n self.cw_rotate()\n self.lr_flip()\n self.find_edges()", "def checkPercolation(pcs):\n o = Options()\n crossings = [[i] for i in range(len(pcs))]\n for i in range(len(pcs)):\n for j in range(i + 1, len(pcs)):\n pc1 = pcs[i]\n pc2 = pcs[j]\n if disksInTheShellCross(pc1, pc2):\n print(i, j)\n crossings[i].append(j)\n crossings[j].append(i)\n for j in range(len(crossings)):\n for k in range(len(crossings[j])):\n if k == 0:\n continue\n else:\n for element in crossings[j]:\n if element not in crossings[crossings[j][k]]:\n crossings[crossings[j][k]].append(element)\n toPop = []\n for i in range(len(crossings)):\n if len(crossings[i]) == 1:\n if not i in toPop:\n toPop.append(i)\n for i in toPop[::-1]:\n crossings.pop(i)\n for i, crossing in enumerate(crossings):\n crossings[i] = set(crossing)\n toPop = []\n for i in range(len(crossings)):\n for j in range(i + 1, len(crossings)):\n if crossings[i] - crossings[j] == set():\n if not i in toPop:\n toPop.append(i)\n print(toPop)\n for i in toPop[::-1]:\n crossings.pop(i)\n \n pprint(crossings)\n \n names = []\n for j, crossing in enumerate(crossings):\n names.append([])\n for i in crossing:\n names[j].append(pcs[i].number())\n for i in range(len(pcs)):\n for namesString in names:\n string1 = str(i)\n for j in range(27):\n string2 = '0' * j + string1\n if string1 in namesString and string2 in namesString:\n print(percolation)\n# names = []\n# for i in crossings:\n# for j in i:\n# print(int(pcs[j].number()), end=' ')\n# print()\n return None", "def chessboardGame(x, y):\n xin = x\n yin = y\n\n # These squares have no possible move, therefore, are losing;\n # we chose these squares by sight; while loop below expands these sets\n # until we encompass whole board\n # it was not clear to me in the beginning that every square has a unique\n # determinant ending under optimal play\n losing_start = set([(1, 1), (2, 1), (1, 2), (2, 2)])\n\n # These squares can jump to losing_start in one move, so are winning\n winning_start = set([(1, 3), (1, 4), (2, 3), (2, 4),\n (3, 1), (3, 2), (3, 3), (3, 4),\n (4, 1), (4, 2), (4, 3)])\n\n def nextset(x, y):\n def isvalid(coord):\n return True if coord[0] >= 1 and coord[1] >= 1 \\\n and coord[0] <= 15 and coord[1] <= 15 else False\n\n nextsquares = [(x - 2, y + 1), (x - 2, y - 1), (x + 1, y - 2),\n (x - 1, y - 2)]\n nextsquares = set([*filter(isvalid, nextsquares)])\n # print(nextsquares)\n return nextsquares\n\n # run a few times through whole board;\n # it takes 5 times to find a definitive win path for all 225 squares\n # 161 squares are winning for first player\n # 64 squares are losing starting for first player\n test_set = [(i, j) for i in range(1, 16) for j in range(1, 16)]\n times = 1\n while (len(winning_start) + len(losing_start)) < 225:\n for coords in test_set:\n x_ = coords[0]\n y_ = coords[1]\n thenextset = nextset(x_, y_)\n # print('testing', x_, y_, thenextset)\n\n if (x_, y_) in losing_start:\n # print('No Path, Second wins')\n pass\n elif (x_, y_) in winning_start:\n # print('One jump to terminal square, First wins')\n pass\n elif (len(winning_start.intersection(thenextset))\n == len(thenextset)):\n # if next set ONLY includes winning_starts, First loses because\n # he has no choice but give win to opponent\n # need to add x,y to losing_start\n losing_start.add((x_, y_))\n # print('we lose, Second wins')\n elif len(losing_start.intersection(thenextset)) > 0:\n # if next set includes ANY losing_start, we win by choosing it\n # need to add x,y to winning_start\n winning_start.add((x_, y_))\n # print('First wins')\n else:\n # print('do not know')\n pass\n\n print('Run', times, len(winning_start) + len(losing_start))\n times += 1\n\n print(len(winning_start))\n print(len(losing_start))\n\n # prints schematic of Winor Loss of each of 15x15 squares\n\n print(' '.join(map(str, [i for i in range(1, 16)])))\n for i in range(15):\n row = ''\n for j in range(15):\n if test_set[i * 15 + j] in winning_start:\n row = row + 'W '\n else:\n row = row + 'L '\n print(row + str(i))\n\n if (xin, yin) in winning_start:\n print('First wins with', xin, yin)\n return 'First'\n else:\n print('Second wins with', xin, yin)\n return 'Second'", "def get_all_possible_moves():\r\n \"\"\"\r\n Creates the labels for the universal chess interface into an array and returns them\r\n \"\"\"\r\n labels_array = []\r\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\r\n numbers = ['1', '2', '3', '4', '5', '6', '7', '8']\r\n promoted_to = ['q', 'r', 'b', 'n']\r\n\r\n for l1 in range(8):\r\n for n1 in range(8):\r\n destinations = [(t, n1) for t in range(8)] + \\\r\n [(l1, t) for t in range(8)] + \\\r\n [(l1 + t, n1 + t) for t in range(-7, 8)] + \\\r\n [(l1 + t, n1 - t) for t in range(-7, 8)] + \\\r\n [(l1 + a, n1 + b) for (a, b) in\r\n [(-2, -1), (-1, -2), (-2, 1), (1, -2), (2, -1), (-1, 2), (2, 1), (1, 2)]]\r\n for (l2, n2) in destinations:\r\n if (l1, n1) != (l2, n2) and l2 in range(8) and n2 in range(8):\r\n move = letters[l1] + numbers[n1] + letters[l2] + numbers[n2]\r\n labels_array.append(move)\r\n for l1 in range(8):\r\n l = letters[l1]\r\n for p in promoted_to:\r\n labels_array.append(l + '2' + l + '1' + p)\r\n labels_array.append(l + '7' + l + '8' + p)\r\n if l1 > 0:\r\n l_l = letters[l1 - 1]\r\n labels_array.append(l + '2' + l_l + '1' + p)\r\n labels_array.append(l + '7' + l_l + '8' + p)\r\n if l1 < 7:\r\n l_r = letters[l1 + 1]\r\n labels_array.append(l + '2' + l_r + '1' + p)\r\n labels_array.append(l + '7' + l_r + '8' + p)\r\n return labels_array", "def make_flips(move, player, board, direction):\n bracket = Othello.find_bracket(move, player, board, direction)\n if not bracket:\n return\n square = move + direction\n while square != bracket:\n board[square] = player\n square += direction", "def shift(shifts):\r\n f = open(\"bytes.txt\", \"r\")\r\n png = f.readline().split()\r\n f.close()\r\n fixed = [None]*len(png)\r\n for i in range(16):\r\n for j in range(len(png)//16):\r\n shift = shifts[i]\r\n fixed[j*16+i] = png[(((j+shift)*16)%len(png))+i]\r\n #print(\" \".join(fixed))\r\n return fixed", "def extensions(self):\n y = 0\n\n # set y value to row of empty space\n while '*' not in self.from_grid[y]:\n y += 1\n x = self.from_grid[y].index('*')\n\n lst = []\n\n # shift piece down, empty space goes up\n if y > 0:\n lst.append(MNPuzzle(swap_up(self.from_grid, y, x),\n self.to_grid))\n\n # shift piece up, empty space goes down\n if y < len(self.from_grid) - 1:\n lst.append(MNPuzzle(swap_down(self.from_grid, y, x),\n self.to_grid))\n\n # shift piece left, empty space goes right\n if x < len(self.from_grid[0]) - 1:\n lst.append(MNPuzzle(swap_right(self.from_grid, y, x),\n self.to_grid))\n\n # shift piece right, empty space goes left\n if x > 0:\n lst.append(MNPuzzle(swap_left(self.from_grid, y, x),\n self.to_grid))\n\n return lst", "def get_3away_pairs(kmers):\n k = len(kmers[0])\n if k == 1 or k==2:\n return []\n if k == 3:\n return [pair for pair in combinations(kmers, 2) if pair[0][0] != pair[1][0] and pair[0][1] != pair[1][1] and pair[0][2] != pair[1][2]]\n k_L = k//2\n k_R = k-k_L\n kmer_L_hashes = defaultdict(list)\n kmer_R_hashes = defaultdict(list)\n pairs = []\n kmers_L = []\n kmers_R = []\n for i, kmer in enumerate(kmers):\n kmer_L = kmer[:k_L]\n kmer_R = kmer[k_L:]\n #print(kmer_L)\n #print(kmer_R)\n kmers_L.append(kmer_L)\n kmers_R.append(kmer_R)\n kmer_L_hashes[kmer_to_int(kmer_L)] += [i]\n kmer_R_hashes[kmer_to_int(kmer_R)] += [i]\n for kmer_L_hash in kmer_L_hashes.values(): #same in first half\n if len(kmer_L_hash) > 1:\n kmer_L = kmers[kmer_L_hash[0]][:k_L] #first half\n pairs += [tuple(kmer_L + kmer for kmer in pair) for pair in get_3away_pairs([kmers[i][k_L:] for i in kmer_L_hash])] #differ by 3 in second half\n for kmer_R_hash in kmer_R_hashes.values(): #same in second half\n if len(kmer_R_hash) > 1:\n kmer_R = kmers[kmer_R_hash[0]][k_L:] #second half\n #print(kmer_R)\n pairs += [tuple(kmer + kmer_R for kmer in pair) for pair in get_3away_pairs([kmers[i][:k_L] for i in kmer_R_hash])] #differ by 3 in first half\n possible_pairs = []\n possible_pairs_L = get_1away_pairs(kmers_L)\n possible_pairs_R = get_2away_pairs(kmers_R)\n #print(kmers_L)\n #print(kmers_R)\n #print(possible_pairs_L)\n #print(possible_pairs_R)\n for possible_pair_L in possible_pairs_L:\n for possible_pair_R in possible_pairs_R:\n possible_kmer1 = possible_pair_L[0]+possible_pair_R[0]\n possible_kmer2 = possible_pair_L[1]+possible_pair_R[1]\n if possible_kmer1 in kmers and possible_kmer2 in kmers:\n pairs += [(possible_kmer1, possible_kmer2)]\n possible_pairs = []\n possible_pairs_L = get_2away_pairs(kmers_L)\n possible_pairs_R = get_1away_pairs(kmers_R)\n for possible_pair_L in possible_pairs_L:\n for possible_pair_R in possible_pairs_R:\n possible_kmer1 = possible_pair_L[0]+possible_pair_R[0]\n possible_kmer2 = possible_pair_L[1]+possible_pair_R[1]\n if possible_kmer1 in kmers and possible_kmer2 in kmers:\n pairs += [(possible_kmer1, possible_kmer2)]\n return(pairs)", "def flipper(deck, message): #flips card in player hand\r\n\tflipcheck, flipcheck1 = 1, 0\r\n\ttempHand = []\r\n\r\n\tprint message,\r\n\ttime.sleep(0.33);print \".\",\r\n\ttime.sleep(0.33);print \".\",\r\n\ttime.sleep(0.34);print \".\"\r\n\r\n\ttry:\r\n\t\twhile flipcheck == 1:\r\n\t\t\ttry:\r\n\t\t\t\ttempHand = random.choice(deck) #grab card from player/cpu hand\r\n\t\t\t\tflipcheck = 0\r\n\r\n\t\t\texcept(TypeError):\r\n\t\t\t\tflipcheck1 += 1\r\n\r\n\t\t\t\tif flipcheck1 == 5:\r\n\t\t\t\t\tsys.exit(TypeError)\r\n\r\n\t\tif tempHand in deck:\r\n\t\t\tdeck.remove(tempHand) #removes tempHand from player/cpu hand\r\n\r\n\texcept(IndexError):\r\n\t\tpass\r\n\r\n\tif type(tempHand) == list:\r\n\t\tprint \"The card was a \" + str(tempHand[1]) + \" of \" + str(tempHand[0]) + \"!\\n\"\r\n\r\n\telse:\r\n\t\tprint \"The card was the \" + tempHand + \" wild card!\"\r\n\r\n\t\tif tempHand == 'MasterSpark': #MasterSpark Wild Card\r\n\t\t\tif deck == playerDeck:\r\n\t\t\t\tplayerScore -= 10\r\n\t\t\t\tprint 'MasterSpark!'\r\n\t\t\t\tplayerDisplayed.remove('MasterSpark')\r\n\t\t\telif deck == cpuDeck:\r\n\t\t\t\tplayerScore -= 10\r\n\t\t\t\tprint 'MasterSpark!'\r\n\t\t\t\tcpuDisplayed.remove('MasterSpark')\r\n\r\n\treturn [tempHand, deck] #returns two values. use arrays to get correct values with tempGrab[]\r", "def traverseRook(self):\n\t\tmoves = np.empty(14, dtype=object)\n\t\tcnt = [0]\n\t\tPiece.traverse(self, cnt, moves, -1, 0)\n\t\tPiece.traverse(self, cnt, moves, 1, 0)\n\t\tPiece.traverse(self, cnt, moves, 0, -1)\n\t\tPiece.traverse(self, cnt, moves, 0, 1)\n\t\treturn moves[:cnt[0]]", "def testStereo(self):\r\n smi_and_cansmi = [\r\n ('OC(=O)[C@@H](CCC(N)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](CCC(N)=O)N', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](N)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@@H](N)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](N)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](N)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](C(O)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](C(O)=O)N', 'NC(=O)CC[C@H](N)C(=O)O')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def whack(initial_moles):\n return search(board_from_moles(initial_moles))", "def heralded_fock_basis(self, detector_pattern):\n undetected_photons = self.photon_number - sum(detector_pattern)\n undetected_modes = set(range(self.N)) - self.circuit.detected_modes\n\n #write down the detector outcome in terms of which modes the photons arrived \n detector_outcome = []\n for mode, occupancy in zip(self.circuit.detected_modes, detector_pattern):\n detector_outcome.extend([mode] * occupancy)\n\n if undetected_photons > 0:\n #look at all options for where undetected photons could be\n undetected_outcomes = combinations_with_replacement(undetected_modes, undetected_photons)\n\n #combine detected and undetected outcomes\n return (tuple(sorted(detector_outcome + list(u))) for u in undetected_outcomes)\n else:\n return (tuple(detector_outcome),)", "def decryptStory():\n wordList = loadWords()\n text = getStoryString() \n k = findBestShift(wordList, text)\n \n return applyShift(text, k)", "def flip(pancakes):\r\n res = ''\r\n for pancake in pancakes:\r\n if pancake == '+':\r\n res += '-'\r\n else:\r\n res += '+'\r\n return res", "def swissPairings():\n standings = playerStandings()\n match_list = []\n\n # Looks at indices in standings with even numbers and pairs them with\n # adjacent players (i.e. players with the most similar standing)\n for x in range(0, len(standings)/2):\n new_match = (standings[2 * x][0], standings[2 * x][1],\n standings[2 * x + 1][0], standings[2 * x + 1][1])\n match_list.append(new_match)\n return match_list", "def _warp_dir(intuple):\n pe = intuple[1][\"PhaseEncodingDirection\"][0]\n return 2 * [[int(pe == ax) for ax in \"ijk\"]]", "def decode_heads_or_tails(sheet):\r\n\r\n all_rows = [[5, 6, 7, 8], [3, 4, 7, 8], [2, 4, 6, 8]] # navigates rows with a 1 in place 6, place 5, place 4\r\n all_columns = [[5, 6, 7, 8], [3, 4, 7, 8], [2, 4, 6, 8]] # navigates through columns\r\n\r\n sheet_state = []\r\n\r\n for rows in all_rows: # navigates rows to count 1's\r\n ones_count = 0\r\n for row in rows:\r\n for column in range(1, 9):\r\n if sheet.cell(row=row, column=column).value == 1:\r\n ones_count += 1\r\n\r\n if ones_count % 2 == 0: # if even\r\n digit = 0\r\n else: # if odd\r\n digit = 1\r\n\r\n sheet_state.append(digit) # builds binary number backwards \r\n\r\n for columns in all_columns: # navigates columns to count 1's\r\n ones_count = 0\r\n for column in columns:\r\n for row in range(1, 9):\r\n if sheet.cell(row=row, column=column).value == 1:\r\n ones_count += 1\r\n\r\n if ones_count % 2 == 0: # if even\r\n digit = 0\r\n else: # if odd\r\n digit = 1\r\n\r\n sheet_state.append(digit) # continues to build binary number backwards\r\n\r\n print('The current state of the decoded board is:\\n{}'.format(sheet_state))\r\n\r\n return sheet_state # binary number as a list of 0's and 1's as ints\r", "def de_flip(n):\n start = '0'*(n//2) + '1' + '0'*(n//2)\n L = [[start]]\n L_tmp = []\n collect = []\n\n \n print(L)\n\n length = 1\n \n while(True):\n flag_cycle = -1\n count = 0\n for next_list in L:\n next_0 = next_list[-1][1:] + '0'\n next_1 = next_list[-1][1:] + '1'\n # 마지막 문자에서 0과 1을 추가한다.\n\n # 사이클완성!\n if next_0 == start:\n collect.append(next_list)\n flag_cycle = len(next_list)\n count += 1\n continue\n\n # 리스트에서 플립핑 검사\n if next_0 not in next_list and next_0[::-1] not in next_list:\n copy = next_list.copy()\n copy.append(next_0)\n L_tmp.append(copy)\n \n if next_1 not in next_list and next_1[::-1] not in next_list:\n copy = next_list.copy()\n copy.append(next_1)\n L_tmp.append(copy)\n \n if len(L_tmp) == 0:\n break\n L = L_tmp.copy()\n L_tmp = []\n\n print(\"length : {0}, flag_cycle : {1}, count : {2}\".format(length,flag_cycle,count))\n length += 1\n \n return collect", "def parse_alg_to_slice_moves(self, alg):\n temp_cube = Cube()\n alg_list = alg.split()\n rev_alg = reverse_alg(alg)\n final_alg = []\n temp_cube.solve_helper = alg\n center = temp_cube.current_perm(5)\n while alg_list:\n slice_move = None\n if len(alg_list) > 1:\n slice_move = temp_cube.check_slice(alg_list[0], alg_list[1])\n if slice_move:\n for m in slice_move.split():\n final_alg.append(m)\n alg_list.pop(0)\n else:\n final_alg.append(alg_list[0])\n alg_list.pop(0)\n alg_apply_rot = temp_cube.parse_rotation_from_alg(final_alg)\n final = []\n final_alg_str = \" \".join(alg_apply_rot)\n if final_alg_str.count('E') == 4:\n found = 0\n for i in range(len(alg_apply_rot)):\n if alg_apply_rot[i] == 'E' or alg_apply_rot[i] == \"E'\":\n found += 1\n if found == 1 or found == 4:\n if alg_apply_rot[i] == 'E':\n final.append(\"U\")\n final.append(\"D'\")\n final.append(\"y'\")\n if alg_apply_rot[i] == \"E'\":\n final.append(\"U'\")\n final.append(\"D\")\n final.append(\"y\")\n else:\n final.append(alg_apply_rot[i])\n else:\n final.append(alg_apply_rot[i])\n\n\n\n final_alg_str =\" \".join(temp_cube.parse_rotation_from_alg(final))\n check_orientation_cube = Cube()\n check_orientation_cube.solve = final_alg_str\n check_orientation_cube.currently_parsing_smart_cube = True\n\n fix = check_orientation_cube.fix_rotation()\n final_alg_str += \" \" + \" \".join(fix)\n return final_alg_str", "async def simulate_odd_draw(teams):\n half_len = int((len(teams)+1)/2)\n arr1 = [i for i in range(half_len)]\n arr2 = [i for i in range(half_len, len(teams)+1)][::-1]\n matches = []\n for i in range(len(teams)):\n arr1.insert(1, arr2.pop(0))\n arr2.append(arr1.pop())\n for a, b in zip(arr1, arr2):\n if len(teams) not in (a, b):\n matches.append((teams[a], teams[b]))\n return matches", "def flip_sides(self) -> 'Piece':\n return Piece(\n str(self.rank),\n self.color.other_color,\n promoted=self.is_promoted\n )", "def correct_boxes(boxes):\n\n return [(b[0], b[2], b[1], b[3]) for b in boxes]", "def get_banned_moves(g, level):\n result = []\n size = len(g)\n\n for i in range(len(g)):\n for j in range(len(g[i])):\n if level == 0:\n break\n result.append((i, j))\n level -= 1\n\n # If we have to move the last element of the row in place then dont ban the move\n # in the top left or it will be impossible\n if len(result) % size == size - 1:\n result.pop()\n\n return result", "def de_flip2(n = 3,start = None):\n if start == None:\n start = '0'*(n//2) + '1' + '0'*(n//2)\n else:\n n = len(start)\n L = [[start]]\n L_tmp = []\n collect = []\n\n \n print(L)\n\n length = 1\n \n while(True):\n flag_cycle = -1\n count = 0\n for next_list in L:\n next_0 = next_list[-1][1:] + '0'\n next_1 = next_list[-1][1:] + '1'\n # 마지막 문자에서 0과 1을 추가한다.\n\n # 사이클완성!\n if next_0 == start:\n collect.append(next_list)\n flag_cycle = len(next_list)\n count += 1\n continue\n\n # 리스트에서 플립핑 검사\n if next_0 not in next_list and next_0[::-1] not in next_list:\n copy = next_list.copy()\n copy.append(next_0)\n L_tmp.append(copy)\n \n if next_1 not in next_list and next_1[::-1] not in next_list:\n copy = next_list.copy()\n copy.append(next_1)\n L_tmp.append(copy)\n \n if len(L_tmp) == 0:\n break\n L = L_tmp.copy()\n L_tmp = []\n\n print(\"length : {0}, flag_cycle : {1}, count : {2}\".format(length,flag_cycle,count))\n length += 1\n \n return collect", "def do_bitflip(zshots, p01, p10):\n zshots = zshots.astype(int)\n r = np.random.rand(*zshots.shape)\n f01 = r<p01\n f10 = r<p10\n return (zshots==1)* (f01*np.logical_not(zshots) + (1-f01)*zshots) + (zshots==0) * (f10 * np.logical_not(zshots) + (1-f10) * zshots)", "def pieces(self):\n return (self.leaf_nodes.order_by('piece')\n .values_list('piece', flat=True))", "def manacher(string):\n\tif not string:\n\t\treturn []\n\tright = 0\n\tcenter = 0\n\tstring = interleave(string)\n\tdps = [0] * len(string)\n\t\n\tfor i in range(1, len(string)):\n\t\tmirror = 2*center - i\n\t\tif i + dps[mirror] < right:\n\t\t\tdps[i] = dps[mirror]\n\t\telse:\n\t\t\tcenter = i\n\t\t\tmirror = 2 * center - right - 1\n\t\t\tridx = right + 1\n\t\t\t# print (i, center, right, mirror)\n\t\t\twhile ridx < len(string):\n\t\t\t\tif mirror >= 0 and string[mirror] == string[ridx]:\n\t\t\t\t\tmirror -= 1\n\t\t\t\t\tridx += 1\n\t\t\t\telse :\n\t\t\t\t\tbreak\n\t\t\t# print (i, center, ridx, mirror)\n\t\t\tright = ridx - 1\n\t\t\tdps[i] = right - i\n\n\t# print (string)\n\treturn dps", "def part2():\n grid[(0, 0)] = 1\n coordinates_value = 0\n layer = 1\n x = 0; y = 0\n done = False\n while not done:\n # print(\"Layer: \", layer)\n # go right one step\n layer += 1; x += 1\n grid[(x,y)] = check_neighbours((x,y))\n\n # go up to the boundary of layer\n for y_up in range(y+1, layer):\n coord = (x, y_up)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_up\n\n # go left till the boundary of layer\n for x_left in range(x-1, -layer, -1):\n coord = (x_left, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_left\n\n # go down till the boundary of layer\n for y_down in range(y-1, -layer, -1):\n coord = (x, y_down)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_down\n\n # go right till the boundary of layer\n for x_right in range(x+1, layer):\n coord = (x_right, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_right", "def _get_piece_moves(self, x, y):\n\n piece = self.get_piece(x, y)\n moves = []\n\n if not piece:\n return moves\n\n if piece.name == 'rook' or piece.name == 'queen':\n direcs = ['up', 'down', 'left', 'right']\n moves = [self._get_moves_indirection(x, y, direc) for direc in\n direcs]\n\n elif piece.name == 'bishop' or piece.name == 'queen':\n direcs = ['d1', 'd2', 'd3', 'd4']\n for direc in direcs:\n moves += self._get_moves_indirection(x, y, direc)\n\n elif piece.name == 'king':\n moves = [(x-1, y-1), (x-1, y), (x-1, y+1), (x, y-1),\n (x, y+1), (x+1, y-1), (x+1, y), (x+1, y+1)]\n\n elif piece.name == 'knight':\n moves = [(x-1, y-2), (x-2, y-1), (x-2, y+1), (x-1, y+2),\n (x+1, y+2), (x+2, y+1), (x+1, y-2), (x+2, y-1)]\n\n elif piece.name == 'pawn':\n if piece.color == ChessGame.BLACK:\n moves = [(x-1, y), (x-1, y-1), (x-1, y+1)]\n else:\n moves = [(x+1, y), (x+1, y-1), (x+1, y+1)]\n\n tmp = list(moves)\n for u, v in tmp:\n if v != y and not self.is_enemy(u, v, piece.color):\n moves.remove((u, v))\n\n if v == y and self.is_enemy(u, v, piece.color):\n moves.remove((u, v))\n\n mycolor = piece.color\n valid = set()\n for (u, v) in moves:\n if not self.in_bounds(u, v):\n continue\n\n if not self.get_piece(u, v): # board is blank\n valid.add((u, v))\n\n if self.is_enemy(u, v, mycolor):\n valid.add((u, v))\n\n return valid", "def winner(board):\n\n for sign in [X, O]:\n for i in range(3):\n if board[0][i] == sign and board[1][i] == sign and board[2][i] == sign:\n return sign\n elif board[i][0] == sign and board[i][1] == sign and board[i][2] == sign:\n return sign\n\n if board[0][0] == sign and board[1][1] == sign and board[2][2] == sign:\n return sign\n elif board[2][0] == sign and board[1][1] == sign and board[0][2] == sign:\n return sign\n\n return None", "def get_moves(self, pgn_data):\n result = []\n\n try:\n exp = '(?P<num>\\d+)\\.(?P<white>\\w+) (?P<black>[\\d|\\w|-]+)'\n result = re.findall(exp, pgn_data)\n\n if result[-1][2] == '1-0' or \\\n result[-1][2] == '0-1' or \\\n result[-1][2] == '1/2-1/2':\n last_item = result[-1]\n del result[-1]\n result.append((last_item[0], last_item[1], ''))\n except:\n pass\n\n return result", "def decrypt_fable():\n ### TODO.\n fable_string = get_fable_string()\n print \"fable_string = \", fable_string\n fable_best_shifts = find_best_shifts(wordlist, fable_string)\n print \"fable_best_shifts =\", fable_best_shifts\n fable_decoded = apply_shifts(fable_string, fable_best_shifts)\n return fable_decoded", "def swissPairings(matchid):\n standings = playerStandings(matchid)\n matchup = []\n \"\"\"\n # if statement pops out player who hasnt had a bye yet\n # bye chosen from middle ranked players\n if len(standings)%2 != 0:\n bye_place = len(standings)/2\n while (standings[bye_place][6] == 1):\n bye_place = bye_place + 1\n bye = standings.pop(bye_place)\n #build match-up system\n for num in range(0, len(standings)):\n if num%2 == 0:\n matches.append(num)\n \"\"\"\n while (len(standings) != 0):\n to_match = standings.pop(0)\n next_player = 0\n print \"to match\"\n while (to_match[0] in standings[next_player][5]):\n print \"next player\"\n next_player = next_player + 1\n matched = standings.pop(next_player)\n matchup.append((to_match[0], to_match[1],\n matched[0], matched[1]))\n if (len(standings) == 1):\n bye = standings.pop(0)\n matchup.append((bye[0],bye[1]))\n return matchup", "def flip_cards(deck, num=1):\n remaining = deck[:]\n random.shuffle(remaining)\n flipped = []\n for i in range(num):\n flipped.append(remaining.pop())\n return flipped, remaining", "def getparts(image, block_len):\n img = image.convert('L') if image.mode != 'L' else image\n w, h = img.size \n parts = []\n # Bluring image for abandoning image details and noise.\n global opt\n for n in range(int(opt.imblev)):\n img = img.filter(ImageFilter.SMOOTH_MORE)\n # Converting image to custom palette\n imagetopalette(img, [x for x in range(256) if x%int(opt.impalred) == 0])\n pix = img.load()\n \n for x in range(w-block_len):\n for y in range(h-block_len):\n data = list(blockpoints(pix, (x,y), block_len)) + [(x,y)]\n parts.append(data)\n parts = sorted(parts)\n return parts", "def getPossibleDropsOutCheck(player, board, playerCaptures):\n listDropOutCheck = []\n for piece in playerCaptures:\n #Get all available places to drop the piece\n listDropPiece = piece.availableDrops(board)\n if type(listDropPiece) == list:\n for item in listDropPiece:\n #Copy the board, piece and see if doing the drop would result in a board that is NOT in check\n copyBoard = copy.deepcopy(board)\n newPiece = copy.deepcopy(piece)\n newPiece.posx = item[0]\n newPiece.posy = item[1]\n copyBoard[item[0]][item[1]] = newPiece\n if not isInCheck(player, copyBoard):\n listDropOutCheck.append((piece.name, item))\n \n return listDropOutCheck" ]
[ "0.6014114", "0.58825684", "0.56461775", "0.556043", "0.5503755", "0.54604626", "0.5395813", "0.53947204", "0.5389461", "0.53884953", "0.53873485", "0.5317875", "0.52953404", "0.5292987", "0.52406585", "0.52359927", "0.5230201", "0.5228546", "0.52233607", "0.5223231", "0.52129936", "0.52057695", "0.5203761", "0.5179851", "0.51633936", "0.51629853", "0.515634", "0.51563215", "0.51540995", "0.5138738", "0.5119869", "0.5119711", "0.5116559", "0.51084715", "0.5100702", "0.5095929", "0.5086154", "0.5073087", "0.5071748", "0.5060047", "0.50583076", "0.5042421", "0.50251067", "0.50147444", "0.50112087", "0.5010524", "0.50070155", "0.5005575", "0.49913266", "0.49808443", "0.4977136", "0.49732754", "0.49669307", "0.49658254", "0.49658138", "0.49639148", "0.49579054", "0.49547732", "0.49488702", "0.4945867", "0.49440885", "0.49333328", "0.49331564", "0.49287605", "0.49260274", "0.49231935", "0.49217921", "0.49159506", "0.49083465", "0.49071977", "0.49026874", "0.4897258", "0.48971698", "0.48946497", "0.48944", "0.4893781", "0.4893454", "0.48928913", "0.48856378", "0.4883541", "0.4879444", "0.48789033", "0.48705876", "0.48586372", "0.48558336", "0.48530433", "0.48511404", "0.48443905", "0.48387173", "0.48301712", "0.48244485", "0.48241726", "0.4821017", "0.48207772", "0.48179692", "0.4812873", "0.4811764", "0.4810772", "0.48042727", "0.4796562" ]
0.64342684
0
This function tests a board for a winner and returns a value between 0 and 5
def test_winner(state_board): res = 3 #default value is tie game ptsb = 0 #points for the black ptsw = 0 #points for the white #looks in the board if there is an empty case while # counting the number of points for each player for i in state_board: for j in i: if j == 0: res = 0 elif j == 1: ptsb += 1 elif j == 2: ptsw += 1 #if there is an empty case, looks for possibilities # for the other player, if no possibility test for the points #if no empty case # test for points #else return 0 if res == 0: if possible(state_board,1) == []: if possible(state_board,2) == []: res = count_points(ptsb,ptsw) else: res = 5 elif possible(state_board,2) == []: res = 4 else: res = count_points(ptsb,ptsw) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(board):\n winner = 0\n for player in [1, 2]:\n if row_win(board, player) or col_win(board, player) or diag_win(board, player):\n winner = player\n \n if np.all(board != 0) and winner == 0:\n winner = -1\n return winner", "def utility(board):\n win = winner(board)\n if win == X: return 1\n elif win == O: return - 1\n else: return 0", "def get_winner(board):\n\n def who_won(in_a_row, board_size, cur_player):\n \"\"\" \n a function private to get_winner() (yes you can do this. Cool huh!?) \n that tells get_winner if it has a winner \n \"\"\"\n if in_a_row == board_size:\n return 1 if cur_player == 'X' else 2\n else:\n return 0\n\n def test_row_col(board, rows):\n \"\"\" private function to test the rows and columns \"\"\"\n for i in range(len(board)):\n cur_player = board[i][0] if rows else board[0][i]\n in_a_row = 0\n for j in range(len(board)):\n symbol = board[i][j] if rows else board[j][i]\n if (not symbol == '-') and (symbol == cur_player):\n in_a_row += 1\n else:\n break\n winner = who_won(in_a_row, len(board), cur_player)\n if not winner == 0:\n return winner\n return 0\n\n def test_diagonal(board, normal):\n \"\"\" private function to test the two diagonals \"\"\"\n cur_player = board[0][0] if normal else board[0][len(board)-1]\n in_a_row = 0\n for i in range(len(board)):\n symbol = board[i][i] if normal else board[i][len(board)-1-i]\n if (not symbol == '-') and (symbol == cur_player):\n in_a_row += 1 \n else:\n break\n winner = who_won(in_a_row, len(board), cur_player)\n if not winner == 0:\n return winner\n return 0\n\n\n # test rows\n winner = test_row_col(board, True)\n if not winner == 0:\n return winner\n\n # test cols\n winner = test_row_col(board, False)\n if not winner == 0:\n return winner\n\n # test diagonal from top left to bottom right\n winner = test_diagonal(board, True)\n if not winner == 0:\n return winner\n\n # test diagonal from top right to bottom left\n winner = test_diagonal(board, False)\n if not winner == 0:\n return winner\n\n return 0", "def winner(board):\n #To determine the winner, I need to know the board's final value. \n token_value = utility(board)\n #if it's 1, X won. If it's -1, O won. Else, it was a tie.\n if(token_value == 1):\n return 'X'\n elif(token_value == -1):\n return 'O'\n else:\n return None", "def utility(board):\n winners = winner(board)\n if (X == winners):\n return 1\n elif (O == winners):\n return -1\n return 0", "def utility(board):\n final = winner(board)\n if final == X:\n return 1\n elif final == O:\n return -1\n else:\n return 0", "def winner(board):\n black_count = board.count(-1)\n white_count = board.count(1)\n if black_count > white_count:\n #if black_count + white_count != 64:\n # black_count += (64 - black_count - white_count)\n return (-1, black_count, white_count)\n elif white_count > black_count:\n #if black_count + white_count != 64:\n # white_count += (64 - black_count - white_count)\n return (1, black_count, white_count)\n else:\n return (0, black_count, white_count)", "def utility(board):\n won = winner(board)\n\n if won == X:\n return 1\n elif won == O:\n return -1\n else:\n return 0", "def utility(board):\n status = winner(board)\n if status == X:\n return 1\n elif status == O:\n return -1\n else:\n return 0", "def utility(board):\n value = 0\n if winner(board) == X:\n value = 1\n elif winner(board) == O:\n value = -1\n else:\n value = 0\n return value", "def winner(board):\n # Hard code winning moves\n # row0\n if board[0][0] == board[0][1] == board[0][2] == X:\n return X\n elif board[0][0] == board[0][1] == board[0][2] == O:\n return O\n # row1\n elif board[1][0] == board[1][1] == board[1][2] == X:\n return X\n elif board[1][0] == board[1][1] == board[1][2] == O:\n return O\n # row2\n elif board[2][0] == board[2][1] == board[2][2] == X:\n return X\n elif board[2][0] == board[2][1] == board[2][2] == O:\n return O\n # col0\n elif board[0][0] == board[1][0] == board[2][0] == X:\n return X\n elif board[0][0] == board[1][0] == board[2][0] == O:\n return O\n # col1\n elif board[0][1] == board[1][1] == board[2][1] == X:\n return X\n elif board[0][1] == board[1][1] == board[2][1] == O:\n return O\n # col2\n elif board[0][2] == board[1][2] == board[2][2] == X:\n return X\n elif board[0][2] == board[1][2] == board[2][2] == O:\n return O\n # diagonal\n elif board[0][0] == board[1][1] == board[2][2] == X:\n return X\n elif board[0][0] == board[1][1] == board[2][2] == O:\n return O\n # inverse diagonal\n elif board[0][2] == board[1][1] == board[2][0] == X:\n return X\n elif board[0][2] == board[1][1] == board[2][0] == O:\n return O\n\n return None", "def utility(board):\n\n # Determine winner\n victor = winner(board)\n\n # Assign proper values accordingly\n if victor == X:\n return 1\n elif victor == O:\n return -1\n else:\n return 0", "def win(board):\n # COLUMNS\n if board[7] == board[4] == board[1] != ' ': # first column down\n return board[7]\n elif board[8] == board[5] == board[2] != ' ': # second column down\n return board[8]\n elif board[9] == board[6] == board[3] != ' ': # third column down\n return board[9]\n\n # ROWS\n if board[7] == board[8] == board[9] != ' ': # first row across\n return board[7]\n elif board[4] == board[5] == board[6] != ' ': # second row across\n return board[4]\n elif board[1] == board[2] == board[3] != ' ': # third row across\n return board[1]\n\n # DIAGONALS\n if board[7] == board[5] == board[3] != ' ': # diagonal staring on left\n return board[7]\n elif board[9] == board[5] == board[1] != ' ': # diagonal starting on right\n return board[9]\n\n # else no winner\n return \"No\"", "def winner(self):\n\n\t\tfor player in [1,2]:\n\t\t\twon = np.full((self.boardSize), player)\n\n\t\t\t# Check diagonals\n\t\t\tif(np.array_equal(np.diag(self.board), won)): return player\n\t\t\tif(np.array_equal(np.diag(np.fliplr(self.board)), won)): return player\n\n\t\t\t# Check lines and columns\n\t\t\tfor i in range(self.boardSize):\n\t\t\t\tif(np.array_equal(self.board[i], won)): return player\n\t\t\t\tif(np.array_equal(self.board[:,i], won)): return player\n\n\t\t# Draw\n\t\tif(not(0 in self.board)): return 3\n\n\t\t# No win or draw\n\t\treturn 0", "def check_for_winner(self):\r\n\r\n # Iterate through the rows\r\n for row in range(self.height):\r\n if self.board[row][0] == self.board[row][1] == self.board[row][2] and self.board[row][0] != None:\r\n return Board.WIN if self.board[row][0] else Board.LOSS\r\n\r\n # Iterate through the columns\r\n for col in range(self.width):\r\n if self.board[0][col] == self.board[1][col] == self.board[2][col] and self.board[0][col] != None:\r\n return Board.WIN if self.board[0][col] else Board.LOSS\r\n\r\n # Diagonals\r\n if self.board[0][0] == self.board[1][1] == self.board[2][2] and self.board[0][0] != None:\r\n return Board.WIN if self.board[0][0] else Board.LOSS\r\n if self.board[0][2] == self.board[1][1] == self.board[2][0] and self.board[0][2] != None:\r\n return Board.WIN if self.board[0][2] else Board.LOSS\r\n\r\n # No winner yet\r\n return 0", "def utility(board):\n if winner(board) is None:\n return 0\n elif winner(board) == X:\n return 1\n elif winner(board) == O:\n return -1", "def winner(board):\n black_count = board.count(-1)\n white_count = board.count(1)\n if black_count > white_count:\n return (-1, black_count, white_count)\n elif white_count > black_count:\n return (1, black_count, white_count)\n else:\n return (0, black_count, white_count)", "def utility(board):\n winning_player = winner(board)\n\n # Did X win?\n if winning_player == X:\n return 1\n\n # Did O win?\n if winning_player == O:\n return -1\n\n return 0", "def winner(board):\n for i in range(len(board)):\n\n # Check rows\n if board[i][0] == board[i][1] == board[i][2] and not board[i][1] == EMPTY:\n return board[i][1]\n\n # Check columns\n elif board[0][i] == board[1][i] == board[2][i] and not board[1][i] == EMPTY:\n return board[1][i]\n\n # Check diagonals\n if board[0][0] == board[1][1] == board[2][2] and not board[1][1] == EMPTY:\n return board[1][1]\n\n if board[2][0] == board[1][1] == board[0][2] and not board[1][1] == EMPTY:\n return board[1][1]\n\n # No winner if get to this point\n return None", "def utility(board) -> int:\n winner_player = winner(board)\n if winner_player == X:\n return 1\n elif winner_player == O:\n return -1\n else:\n return 0", "def winner(board):\n # finite list of possible wins\n winnings = [\n (0, 0), (0, 1), (0, 2), \n (1, 0), (1, 1), (1, 2),\n (2, 0), (2, 1), (2, 2),\n (0, 0), (1, 0), (2, 0),\n (0, 1), (1, 1), (2, 1),\n (0, 2), (1, 2), (2, 2),\n (0, 0), (1, 1), (2, 2),\n (2, 0), (1, 1), (0, 2)\n ]\n # if the board has one of the lists in winnings \n # then the piece in one of those spots is the winner\n xcount = 0\n ocount = 0\n for i in range(len(winnings)):\n if(board[winnings[i][0]][winnings[i][1]] == X):\n xcount += 1\n if(board[winnings[i][0]][winnings[i][1]] == O):\n ocount += 1\n if((i + 1) % 3 == 0):\n if(ocount == 3 or xcount == 3):\n return board[winnings[i][0]][winnings[i][1]]\n else:\n ocount = 0\n xcount = 0\n return EMPTY", "def utility(board):\n if winner(board) == X:\n return 1\n if winner(board) == O:\n return -1\n else:\n return 0", "def winner(board):\n # check columns\n for j in range(3):\n if board[1][j] == board[0][j] and board[0][j] == board[2][j] and board[1][j] != EMPTY:\n return board[1][j]\n # check rows\n for i in range(3):\n if board[i][0] == board[i][1] and board[i][1] == board[i][2] and board[i][0] != EMPTY:\n return board[i][0]\n # check diagnols\n if board[0][0] == board[1][1] and board[1][1] == board[2][2] and board[0][0] != EMPTY:\n return board[1][1]\n if board[0][2] == board[1][1] and board[1][1] == board[2][0] and board[0][2] != EMPTY:\n return board[1][1]\n return None", "def utility(board):\n winner_player = winner(board)\n if winner_player == X:\n return 1\n elif winner_player == O:\n return -1\n else:\n return 0", "def winner(board):\n\n # Check none empty horizontals\n for i in range(3):\n if board[i][0] and board[i][0] == board[i][1] == board[i][2]:\n return board[i][0]\n\n # Check none empty verticals\n for j in range(3):\n if board[0][j] and board[0][j] == board[1][j] == board[2][j]:\n return board[0][j]\n\n # Check none empty L-R diagonal\n if board[0][0] and board[0][0] == board[1][1] == board[2][2]:\n return board[0][0]\n\n # Check none empty R-L diagonal\n if board[0][2] and board[0][2] == board[1][1] == board[2][0]:\n return board[0][2]", "def utility(board):\n if winner(board) == X:\n return 1\n elif winner(board) == O:\n return -1\n else:\n return 0", "def utility(board):\n if winner(board) == X:\n return 1\n elif winner(board) == O:\n return -1\n else:\n return 0", "def utility(board):\n if winner(board) == X:\n return 1\n elif winner(board) == O:\n return -1\n else:\n return 0", "def utility(board):\n if winner(board) == X:\n return 1\n elif winner(board) == O:\n return -1\n else:\n return 0", "def utility(board):\n if winner(board) == X:\n return 1\n elif winner(board) == O:\n return -1\n else:\n return 0", "def utility(board):\n if winner(board) == X:\n return 1\n elif winner(board) == O:\n return -1\n return 0", "def check_winner(self):\n if DotsAndBoxesState.score1 > 4: # Because the total score is fixed at nine, if player's score is greater than four,\n # then the player is the winner.\n return \"A\"\n else:\n return \"B\"", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def winner(board) -> any:\n numeric_board = [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]\n\n total_horizon = [0, 0, 0]\n total_vertical = [0, 0, 0]\n total_diagonally = [0, 0]\n for i, row in enumerate(board):\n for j, column in enumerate(row):\n if column == X:\n numeric_board[i][j] = 1\n total_horizon[i] += 1\n total_vertical[j] += 1\n elif column == O:\n numeric_board[i][j] = -1\n total_horizon[i] += -1\n total_vertical[j] += -1\n\n n = len(numeric_board)\n total_diagonally[0] = sum(numeric_board[i][i] for i in range(n))\n total_diagonally[1] = sum(numeric_board[i][n - i - 1] for i in range(n))\n\n if 3 in total_horizon or 3 in total_vertical or 3 in total_diagonally:\n return X\n elif -3 in total_horizon or -3 in total_vertical or -3 in total_diagonally:\n return O\n else:\n return None", "def winner(board):\n\n # Check for horizontal wins\n for row in board:\n if row[0] == row[1] == row[2] and row[0] is not None:\n return row[0]\n\n # Check for vertical wins\n for i in range(3):\n if board[0][i] == board[1][i] == board[2][i] and board[0][i] is not None:\n return board[0][i]\n\n # Check for diagonal wins\n if board[0][0] == board[1][1] == board[2][2] and board[0][0] is not None:\n return board[0][0]\n if board[0][2] == board[1][1] == board[2][0] and board[0][2] is not None:\n return board[0][2]\n\n # If there is no winner, return None\n return None", "def utility(board):\n win = winner(board)\n if win == \"X\":\n return 1\n elif win == \"O\":\n return -1\n else:\n return 0", "def winner(board):\n if board[0][0] != EMPTY and (board[0][0] == board[0][1] == board[0][2] \n or board[0][0] == board[1][1] == board[2][2] \n or board[0][0] == board[1][0] == board[2][0]):\n return board[0][0]\n\n elif board[1][1] != EMPTY and (board[1][0] == board[1][1] == board[1][2]\n or board[0][1] == board[1][1] == board[2][1]):\n return board[1][1]\n \n elif board[2][2] != EMPTY and (board[0][2] == board[1][2] == board[2][2]\n or board[2][0] == board[2][1] == board[2][2]):\n return board[2][2]\n \n elif board[2][0] != EMPTY and (board[2][0] == board[1][1] == board[0][2]):\n return board[2][0]\n \n else:\n None", "def winner(board):\n # return 0[[0EMPTY, 1EMPTY, 2EMPTY],\n # 1[EMPTY, EMPTY, EMPTY],\n # 2[EMPTY, EMPTY, EMPTY]]\n # Check columns\n if board[0][0] == board[1][0] and board[1][0] == board[2][0]:\n return board[0][0]\n elif board[0][1] == board[1][1] and board[1][1] == board[2][1]:\n return board[0][1]\n elif board[0][2] == board[1][2] and board[1][2] == board[2][2]:\n return board[0][2]\n # Check rows\n elif all(i == board[0][0] for i in board[0]):\n return board[0][0]\n elif all(i == board[1][0] for i in board[1]):\n return board[1][0]\n elif all(i == board[2][0] for i in board[2]):\n return board[2][0]\n # Check diagonals\n elif board[0][0] == board[1][1] and board[1][1] == board[2][2]:\n return board[0][0]\n elif board[0][2] == board[1][1] and board[1][1] == board[2][0]:\n return board [0][2]\n else:\n return None", "def winner(board):\r\n\r\n #rows:\r\n if (board[0][0] == board[0][1] == board[0][2]) and (board[0][0] == \"X\" or board[0][0] == \"O\"):\r\n return board[0][0]\r\n if (board[1][0] == board[1][1] == board[1][2]) and (board[1][0] == \"X\" or board[1][0] == \"O\"):\r\n return board[1][0]\r\n if (board[2][0] == board[2][1] == board[2][2]) and (board[2][0] == \"X\" or board[2][0] == \"O\"):\r\n return board[2][0]\r\n\r\n #columns\r\n if (board[0][0] == board[1][0] == board[2][0]) and (board[0][0] == \"X\" or board[0][0] == \"O\"):\r\n return board[0][0]\r\n if (board[0][1] == board[1][1] == board[2][1]) and (board[0][1] == \"X\" or board[0][1] == \"O\"):\r\n return board[0][1]\r\n if (board[0][2] == board[1][2] == board[2][2]) and (board[0][2] == \"X\" or board[0][2] == \"O\"):\r\n return board[0][2]\r\n\r\n #diagonals\r\n if (board[0][0] == board[1][1] == board[2][2]) and (board[0][0] == \"X\" or board[0][0] == \"O\"):\r\n return board[0][0]\r\n if (board[0][2] == board[1][1] == board[2][0]) and (board[0][2] == \"X\" or board[0][2] == \"O\"):\r\n return board[0][2]\r\n \r\n return None\r\n\r\n raise NotImplementedError", "def has_winner(self):\r\n\r\n\t\t\"Check for horizonal win\"\r\n\r\n\t\tfor x in range(0, 3):\r\n\r\n\t\t\tif self.game_board[x][0] == self.game_board[x][1] and self.game_board[x][1] == self.game_board[x][2]:\r\n\r\n\t\t\t\treturn self.game_board[x][0]\r\n\r\n\t\t\"Check for vertical win\"\r\n\r\n\t\tfor y in range(0, 3):\r\n\r\n\t\t\tif self.game_board[0][y] == self.game_board[1][y] and self.game_board[1][y] == self.game_board[2][y]:\r\n\r\n\t\t\t\treturn self.game_board[0][y]\r\n\r\n\t\t\"Check for diagonal from left to right\"\r\n\t\r\n\t\tif self.game_board[0][0] == self.game_board[1][1] and self.game_board[1][1] == self.game_board[2][2]:\r\n\t\t\treturn self.game_board[1][1]\t\r\n\r\n\t\tif self.game_board[0][2] == self.game_board[1][1] and self.game_board[1][1] == self.game_board[2][0]:\r\n\t\t\treturn self.game_board[1][1]\t\r\n\r\n\t\tif self.count == 8:\r\n\r\n\t\t\treturn \"Tie\"\r\n\r\n\t\telse:\r\n\r\n\t\t\treturn \"0\"\r\n\r\n\r\n\t\tpass", "def utility(board):\n winning_player = winner(board)\n\n if winning_player is X:\n return 1\n if winning_player is O:\n return -1\n \n return 0", "def winner(self, board):\n if self.any_legal_move(BLACK, board) or self.any_legal_move(WHITE,board):\n return None\n scoreBlack = self.score(BLACK, board)\n scoreWhite = self.score(WHITE, board)\n if scoreBlack > scoreWhite: return PLAYERS[BLACK]\n elif scoreBlack < scoreWhite: return PLAYERS[WHITE]\n else: return TIE", "def winner(board):\n \n for m in [\"XXX\", \"OOO\"]:\n # horizontal\n for row in range(3):\n if board[row][0] == board[row][1] == board[row][2]:\n return board[row][0]\n # vertical\n for col in range(3):\n if board[0][col] == board[1][col] == board[2][col]:\n return board[0][col]\n # diagonal\n if board[0][0] == board[1][1] == board[2][2]:\n return board[1][1]\n if board[0][2] == board[1][1] == board[2][0]:\n return board[1][1]\n return None", "def winner(board):\n\t#For X\n\tiswinnerX = winnerForPlayer(board, X)\n\tiswinnerO = winnerForPlayer(board, O)\n\n\tif iswinnerX:\n\t\treturn X\n\tif iswinnerO:\n\t\treturn O\n\n\treturn None", "def winner(self, board):\n # Cek baris\n if all(i == board[0][0] for i in board[0]):\n return board[0][0]\n elif all(i == board[1][0] for i in board[1]):\n return board[1][0]\n elif all(i == board[2][0] for i in board[2]):\n return board[2][0]\n \n # Cek kolom\n elif board[0][0] == board[1][0] and board[1][0] == board[2][0]:\n return board[0][0]\n elif board[0][1] == board[1][1] and board[1][1] == board[2][1]:\n return board[0][1]\n elif board[0][2] == board[1][2] and board[1][2] == board[2][2]:\n return board[0][2]\n \n # Cek diagonal\n elif board[0][0] == board[1][1] and board[1][1] == board[2][2]:\n return board[0][0]\n elif board[0][2] == board[1][1] and board[1][1] == board[2][0]:\n return board[0][2]\n else:\n return None", "def winner(board):\n # Check Rows\n for row in board:\n if row[0] != EMPTY and row[0] == row[1] and row[0] == row[2]:\n return row[0]\n \n # Check Columns\n for j in range(3):\n if board[0][j] != EMPTY and board[0][j] == board[1][j]:\n if board[0][j] == board[2][j]:\n return board[0][j]\n \n # Check Diagonals\n if board[1][1] != EMPTY:\n if board[0][0] == board[1][1] and board[0][0] == board[2][2]:\n return board[0][0]\n if board[0][2] == board[1][1] and board[0][2] == board[2][0]:\n return board[0][2]\n\n return None", "def test_get_winner():\n\n board = Board()\n board1 = Board()\n board2 = Board()\n\n # board is empty\n board.find_winner(0)\n assert board.get_winner() == board.EMPTY\n\n # vertical win\n for i in range(4):\n board.move(Board.P1, 1)\n\n for i in range(3):\n board.move(Board.P2, 2)\n board.find_winner(1)\n assert board.get_winner() == board.P1\n\n \"\"\"\n Board looks like:\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|-|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|-|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|-|X|-|-|-|-|-|2\n +-+-+-+-+-+-+-+\n 3|-|X|O|-|-|-|-|3\n +-+-+-+-+-+-+-+\n 4|-|X|O|-|-|-|-|4\n +-+-+-+-+-+-+-+\n 5|-|X|O|-|-|-|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n \"\"\"\n\n # horizontal win\n for i in range(4):\n board1.move(Board.P2, i)\n for i in range(3):\n board1.move(Board.P1, 1)\n board1.find_winner(2)\n assert board1.get_winner() == board.P2\n\n \"\"\"\n Board1 looks like:\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|-|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|-|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|-|X|-|-|-|-|-|2\n +-+-+-+-+-+-+-+\n 3|-|X|-|-|-|-|-|3\n +-+-+-+-+-+-+-+\n 4|-|X|-|-|-|-|-|4\n +-+-+-+-+-+-+-+\n 5|O|O|O|O|-|-|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n \"\"\"\n\n # diagonal win\n\n board2.move(Board.P1, 1)\n board2.move(Board.P2, 2)\n\n board2.move(Board.P1, 2)\n board2.move(Board.P2, 3)\n\n board2.move(Board.P1, 4)\n board2.move(Board.P2, 3)\n\n board2.move(Board.P1, 3)\n board2.move(Board.P2, 5)\n\n board2.move(Board.P1, 4)\n board2.move(Board.P2, 4)\n\n board2.move(Board.P1, 4)\n\n board2.find_winner(1)\n assert board2.get_winner() == board.P1\n\n \"\"\"\n Board 2 looks like\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|-|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|-|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|-|-|-|-|X|-|-|2\n +-+-+-+-+-+-+-+\n 3|-|-|-|X|O|-|-|3\n +-+-+-+-+-+-+-+\n 4|-|-|X|O|X|-|-|4\n +-+-+-+-+-+-+-+\n 5|-|X|O|O|X|O|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n \"\"\"", "def winner(board):\n if board[0][0] == board[0][1] == board[0][2] != EMPTY:\n if board[0][0] == X:\n return X\n else:\n return O\n\n elif board[1][0] == board[1][1] == board[1][2] != EMPTY:\n if board[1][0] == X:\n return X\n else:\n return O\n\n elif board[2][0] == board[2][1] == board[2][2] != EMPTY:\n if board[2][0] == X:\n return X\n else:\n return O\n\n elif board[0][0] == board[1][0] == board[2][0] != EMPTY:\n if board[0][0] == X:\n return X\n else:\n return O\n\n elif board[0][1] == board[1][1] == board[2][1] != EMPTY:\n if board[0][1] == X:\n return X\n else:\n return O\n\n elif board[0][2] == board[1][2] == board[2][2] != EMPTY:\n if board[0][2] == X:\n return X\n else:\n return O\n\n elif board[0][0] == board[1][1] == board[2][2] != EMPTY:\n if board[0][0] == X:\n return X\n else:\n return O\n\n elif board[0][2] == board[1][1] == board[2][0] != EMPTY:\n if board[0][2] == X:\n return X\n else:\n return O\n else:\n return None\n\n #raise NotImplementedError", "def winner(board):\n for i in range(3):\n if board[i][0] == board[i][1] == board[i][2] != None:\n return board[i][0]\n for i in range(3):\n if board[0][i] == board[1][i] == board[2][i] != None:\n return board[0][i]\n if board[0][0] == board[1][1] == board[2][2]:\n return board[0][0]\n if board[0][2] == board[1][1] == board[2][0]:\n return board[0][2]\n return None", "def check_for_winner():\n global winner\n row_winner = check_rows()\n column_winner = check_columns()\n diagonal_winner = check_diagonals()\n if row_winner:\n winner = row_winner\n elif column_winner:\n winner = column_winner\n elif diagonal_winner:\n winner = diagonal_winner\n else:\n winner = None", "def getUtility(board):\n winner = getWinner(board)\n if winner == X:\n return 1\n if winner == O:\n return -1\n return 0", "def check_for_winner(self):\n\n winner = self.__winner_in_row__()\n winner = self.__winner_in_column__()\n winner = self.__winner_in_diag__()\n\n if winner is None:\n winner = self.__boardFull__()\n \n return winner", "def winner(board):\n \n possible_wins = []\n row1 = board[0]\n row2 = board[1]\n row3 = board[2]\n col1 = [board[0][0],board[1][0],board[2][0]]\n col2 = [board[0][1],board[1][1],board[2][1]]\n col3 = [board[0][2],board[1][2],board[2][2]]\n diag1 = [board[0][0],board[1][1],board[2][2]]\n diag2 = [board[2][0],board[1][1],board[0][2]]\n \n possible_wins.append(row1)\n possible_wins.append(row2)\n possible_wins.append(row3)\n possible_wins.append(col1)\n possible_wins.append(col2)\n possible_wins.append(col3)\n possible_wins.append(diag1)\n possible_wins.append(diag2)\n \n for trait in possible_wins:\n if trait.count(\"X\") == 3:\n return \"X\"\n elif trait.count(\"O\") == 3:\n return \"O\"\n \n return None", "def game_value(self, state):\r\n # check horizontal wins\r\n for row in state:\r\n for i in range(2):\r\n if row[i] != ' ' and row[i] == row[i+1] == row[i+2] == row[i+3]:\r\n return 1 if row[i] == self.my_piece else -1\r\n # check col wins\r\n for col in range(5):\r\n for i in range(2):\r\n if state[i][col] != ' ' and state[i][col] == state[i+1][col] == state[i+2][col] == state[i+3][col]:\r\n return 1 if state[i][col] == self.my_piece else -1\r\n #check diag up wins\r\n for x in range(2):\r\n for y in range(2):\r\n if state[x][y] != ' ' and state[x][y] == state[x+1][y+1] == state[x+2][y+2] == state[x+3][y+3]:\r\n return 1 if state[x][y] == self.my_piece else -1\r\n #check diag down wins\r\n for x in range(2):\r\n for y in range(3, 5):\r\n if state[x][y] != ' ' and state[x][y] == state[x+1][y-1] == state[x+2][y-2] == state[x+3][y-3]:\r\n return 1 if state[x][y] == self.my_piece else -1\r\n #check square box wins \r\n for x in range(4):\r\n for y in range(4):\r\n if state[x][y] != ' ' and state[x][y] == state[x+1][y] == state[x][y+1] == state[x+1][y+1]:\r\n return 1 if state[x][y] == self.my_piece else -1\r\n\r\n return 0 # no winner yet\r", "def check_for_winner():\n global winner\n # Check rows.\n row_winner = check_rows()\n # Check columns.\n column_winner = check_columns()\n # Check diagonals.\n diagonal_winner = check_diagonals()\n if row_winner:\n winner = row_winner\n elif column_winner:\n winner = column_winner\n elif diagonal_winner:\n winner = diagonal_winner\n else:\n winner = None", "def winner(board):\n # Checking for 3 in a row\n for row in board:\n if row[0] is not EMPTY and row[0] == row[1] == row[2]:\n return row[0]\n\n # Checking for 3 in a col\n for col in range(len(board)):\n if board[0][col] is not EMPTY and board[0][col] == board[1][col] == board[2][col]:\n return board[0][col]\n\n # Checking for Diagonals\n if board[0][0] is not EMPTY and board[0][0] == board[1][1] == board[2][2]:\n return board[0][0]\n \n if board[0][2] is not EMPTY and board[0][2] == board[2][0] == board[1][1]:\n return board[0][2]\n\n return None", "def evaluate_board(self, board):\n \n win_score = 100\n win_or_loss_score = 50\n lose_score = 0\n \n if board.win_for(self.opponent()):\n return lose_score\n if board.win_for(self.side):\n return win_score\n if not board.win_for(self.side) or not board.win_for(self.opponent()):\n return win_or_loss_score", "def utility(board):\n \n if winner(board) == 'X':\n return 1\n elif winner(board) == 'O':\n return -1\n else :\n return 0", "def winner(board):\n for i in (O, X):\n for j in range(3):\n if (board[j][0] == i and board[j][1] == i and board[j][2] == i):\n return i\n if (board[0][j] == i and board[1][j] == i and board[2][j] == i):\n return i\n if (board[0][0] == i and board[1][1] == i and board[2][2] == i):\n return i\n if (board[2][0] == i and board[1][1] == i and board[0][2] == i):\n return i\n return None", "def winner(board):\n # Horizontal win check\n\n for i in board:\n if i[0] == i[1] and i[0] == i[2] and i[0] != EMPTY:\n if i[0] == X:\n return X\n else:\n return O\n \n # Vertical win check\n \n if board[0][0] == board[1][0] and board[0][0] == board[2][0]:\n if board[0][0] != EMPTY:\n if board[0][0] == X:\n return X\n else:\n return O\n if board[0][1] == board[1][1] and board[0][1] == board[2][1]:\n if board[0][1] != EMPTY:\n if board[0][1] == X:\n return X\n else:\n return O\n if board[0][2] == board[1][2] and board[0][2] == board[2][2]:\n if board[0][2] != EMPTY:\n if board[0][2] == X:\n return X\n else:\n return O\n\n # Diagonal win check\n\n if board[0][0] == board[1][1] and board[0][0] == board[2][2]:\n if board[0][0] != EMPTY:\n if board[0][0] == X:\n return X\n else:\n return O\n if board[0][2] == board[1][1] and board[0][2] == board[2][0]:\n if board[0][2] != EMPTY:\n if board[0][2] == X:\n return X\n else:\n return O\n\n return None", "def test_winner_won(self):\n board = self.board_class()\n board.place_token(1, 1, 'X')\n board.place_token(0, 0, 'O')\n board.place_token(1, 0, 'X')\n board.place_token(0, 2, 'O')\n board.place_token(1, 2, 'X')\n assert board.calc_winner() == 'X'", "def check_winner(self):\n for row in self.board.values():\n if all([mark == \"x\" for mark in row]):\n return self.player_1\n elif all([mark == \"o\" for mark in row]):\n return self.player_2\n\n # checks every column\n for i in range(3):\n first_row, second_row, third_row = self.board.values()\n if first_row[i] == \"x\" and second_row[i] == \"x\" and third_row[i] == \"x\":\n return self.player_1\n elif first_row[i] == \"o\" and second_row[i] == \"o\" and third_row[i] == \"o\":\n return self.player_2\n\n # checks the diagonals\n if self.board[\"a\"][0] == \"x\" and self.board[\"b\"][1] == \"x\" and self.board[\"c\"][2] == \"x\":\n return self.player_1\n if self.board[\"a\"][2] == \"o\" and self.board[\"b\"][1] == \"o\" and self.board[\"c\"][0] == \"o\":\n return self.player_2\n\n return None", "def utility(board):\n # if game is over (tie/winner) decide who won\n if(terminal(board)):\n if(winner(board) == X):\n return 1\n elif(winner(board) == O):\n return -1\n else:\n return 0", "def winner(board):\n chances = [X, O]\n for chance in chances:\n for row in range(3):\n if list(chance)*3 == board[row]:\n return chance\n for column in range(3):\n if [[chance] for i in range(3)] == [[board[row][column]] for row in range(3)]:\n return chance\n if board[0][0] == chance and board[1][1] == chance and board[2][2] == chance:\n return chance\n if board[0][2] == chance and board[1][1] == chance and board[2][0] == chance:\n return chance\n return None", "def getWinner(self):\n global vertical_win\n global done\n lines = (\n self.board, # columns\n zip(*self.board), # rows\n diagonalsPos(self.board, self.cols, self.rows), # positive diagonals\n diagonalsNeg(self.board, self.cols, self.rows) # negative diagonals\n )\n\n for sublist in self.board:\n if sublist[0] == sublist[1] == sublist[2] == sublist[3] or sublist[1] == sublist[2] == sublist[3] == \\\n sublist[4] or sublist[2] == sublist[3] == sublist[4] == sublist[5]:\n vertical_win = True\n\n for line in chain(*lines):\n for color, group in groupby(line):\n if color != 0 and len(list(group)) >= self.win:\n done = True\n return color\n counter = 0\n for sublist in self.board:\n for i in sublist:\n if i != 0:\n counter += 1\n if counter == 42:\n done = True\n return Tie", "def determine_winner(self):\r\n for i in range(2):\r\n # Way of the Stone (capture opponent master)\r\n if not self.bitboard_king[i]:\r\n return 1 - i * 2\r\n # Way of the Stream (move master to opposite square)\r\n if self.bitboard_king[i] == self.WIN_BITMASK[i]:\r\n return i * 2 - 1\r\n return 0", "def winner(board):\n d_board = np.array(board)\n board_list = d_board.tolist()\n if board[0][0] == \"O\" and board[1][1] == \"O\" and board[2][2] == \"O\":\n return O\n elif board[0][0] == \"X\" and board[1][1] == \"X\" and board[2][2] == \"X\":\n return X\n elif board[0][2] == \"O\" and board[1][1] == \"O\" and board[2][0] == \"O\":\n return O\n elif board[0][2] == \"X\" and board[1][1] == \"X\" and board[2][0] == \"X\":\n return X\n else:\n for row in board_list:\n if row.count(\"X\") == 3:\n return X\n elif row.count(\"O\") == 3:\n return O\n board_list = d_board.transpose().tolist()\n for row in board_list:\n if row.count(\"X\") == 3:\n return X\n elif row.count(\"O\") == 3:\n return O\n return None\n\n #raise NotImplementedError", "def winner(board):\n # check rows\n for row in range(len(board)):\n if board[row][0] == board[row][1] == board[row][2]:\n if board[row][0] == X:\n return X\n elif board[row][0] == O:\n return O\n else:\n return None\n \n # check cols\n for col in range(len(board[0])):\n if board[0][col] == board[1][col] == board[2][col]:\n if board[0][col] == X:\n return X\n elif board[0][col] == O:\n return O\n else:\n return None\n\n # Check diagonal\n if board[0][0] == board[1][1] == board[2][2]:\n if board[0][0] == X:\n return X\n elif board[0][0] == O:\n return O\n else:\n return None\n\n # Check diagonal\n if board[0][2] == board[1][1] == board[2][0]:\n if board[0][2] == X:\n return X\n elif board[0][2] == O:\n return O\n else:\n return None\n\n # else\n return None", "def winner(board):\n for i in range(3):\n firstnumber=board[i][0]\n if firstnumber!=EMPTY:\n secondnumber=board[i][1]\n if secondnumber==firstnumber:\n if board[i][2]==secondnumber:\n return secondnumber\n else:\n continue\n else:\n continue\n else:\n continue\n for i in range(3):\n firstnumber=board[0][i]\n if firstnumber!=EMPTY:\n secondnumber=board[1][i]\n if secondnumber==firstnumber:\n if board[2][i]==secondnumber:\n return secondnumber\n else:\n continue\n else:\n continue\n else:\n continue\n firstnumber=board[0][0]\n if firstnumber!= EMPTY:\n if board[1][1]==firstnumber:\n if board[2][2]==firstnumber:\n return firstnumber\n firstnumber=board[2][0]\n if firstnumber!= EMPTY:\n if board[1][1]==firstnumber:\n if board[0][2]==firstnumber:\n return firstnumber\n return None\n raise NotImplementedError", "def game_value(self, state):\n # check horizontal wins\n for row in state:\n for i in range(2):\n if row[i] != ' ' and row[i] == row[i+1] == row[i+2] == row[i+3]:\n return 1 if row[i]==self.my_piece else -1\n\n # check vertical wins\n for col in range(5):\n for i in range(2):\n if state[i][col] != ' ' and state[i][col] == state[i+1][col] == state[i+2][col] == state[i+3][col]:\n return 1 if state[i][col]==self.my_piece else -1\n\n # TODO: check \\ diagonal wins\n for col in range(2):\n for i in range(2):\n if state[i][col] != ' ' and state[i][col] == state[i+1][col+1] == state[i+2][col+2] == state[i+3][col+3]:\n return 1 if state[i][col]==self.my_piece else -1\n # TODO: check / diagonal wins\n for col in range(2):\n for i in range(2):\n if state[i][col+3] != ' ' and state[i][col+3] == state[i+1][col+2] == state[i+2][col+1] == state[i+3][col]:\n return 1 if state[i][col]==self.my_piece else -1\n # TODO: check 2x2 box wins\n for col in range(4):\n for i in range(4):\n if state[i][col] != ' ' and state[i][col] == state[i+1][col] == state[i][col+1] == state[i+1][col+1]:\n return 1 if state[i][col]==self.my_piece else -1\n \n return 0 # no winner yet", "def utility(board):\n\tif winner(board) == X:\n\t\treturn 1\n\n\tif winner(board) == O:\n\t\treturn -1\n\n\treturn 0", "def winner(board):\n for row in range(len(board)):\n if board[row].count(X) == 3:\n return X\n elif board[row].count(O) == 3:\n return O\n for column in range(len(board[row])):\n if board[0][column] == X and board[1][column] == X and board[2][column] == X:\n return X\n elif board[0][column] == O and board[1][column] == O and board[2][column] == O:\n return O\n\n if (board[0][0] == X and board[1][1] ==X and board[2][2] ==X) or (board[0][2] == X and board[1][1]==X and board[2][0] ==X):\n return X\n\n elif (board[0][0] == O and board[1][1] == O and board[2][2] == O) or (board[0][2] == O and board[1][1]== O and board[2][0] ==O):\n return O\n\n else: return None\n\n\n\n # raise NotImplementedError", "def utility(board):\n if winner(board)== X:\n return 1\n elif winner(board)== O:\n return -1\n else: return 0\n\n # raise NotImplementedError", "def _check_winner(self) -> Optional[int]:\n\n if self._is_red_active:\n temp_board = self.board_array.clip(min=0, max=1) # Turns all -1's into 0's\n for kernel in self._detection_kernels_red:\n # For each of the patterns that produce a win, do a 2d convolution on the copy\n # of the board. If there 4's in the resulting array, one of the kernels found a\n # match and so there is a 4 in a row somewhere. This is done this way to save\n # as much time as possible, because optimisation is very important to the\n # AI's performance and python is already quite slow\n if np.any(convolve2d(temp_board, kernel, mode='valid') == 4):\n return 1\n else:\n temp_board = self.board_array.clip(min=-1, max=0) # Turns all 1's into 0's\n for kernel in self._detection_kernels_yellow:\n # Same as before\n if np.any(convolve2d(temp_board, kernel, mode='valid') == 4):\n return -1\n\n if len(self._valid_moves) == 0:\n return 0\n\n return None", "def checkWinner(self):\n\n # Check horizontal and vertical lines.\n for j in range(3):\n if self.board[0][j] != \" \" and self.board[0][j] == self.board[1][j] == self.board[2][j]:\n return self.board[0][j]\n if self.board[j][0] != \" \" and self.board[j][0] == self.board[j][1] == self.board[j][2]:\n return self.board[j][0]\n\n # Check diagonals.\n if self.board[0][0] != \" \" and self.board[0][0] == self.board[1][1] == self.board[2][2]:\n return self.board[0][0]\n elif self.board[0][2] != \" \" and self.board[0][2] == self.board[1][1] == self.board[2][0]:\n return self.board[0][2]\n\n # Check if there is a tie.\n if self.empty_cells == 0:\n return \"Tie\"\n\n # No winner yet.\n return None", "def game_value(self, state):\n # check horizontal wins\n for row in state:\n for i in range(2):\n if row[i] != ' ' and row[i] == row[i+1] == row[i+2] == row[i+3]:\n return 1 if row[i]==self.my_piece else -1\n\n # check vertical wins\n for col in range(5):\n for i in range(2):\n if state[i][col] != ' ' and state[i][col] == state[i+1][col] == state[i+2][col] == state[i+3][col]:\n return 1 if state[i][col]==self.my_piece else -1\n\n # check \\ diagonal wins\n for i in range(2):\n for j in range(2):\n if state[i][j]!= ' ' and state[i][j] == state[i+1][j+1] == state[i+2][j+2] == state[i+3][j+3]:\n return 1 if state[i][j]==self.my_piece else -1\n # check / diagonal wins\n for i in range(3,5):\n for j in range(2):\n if state[i][j]!= ' ' and state[i][j] == state[i-1][j+1] == state[i-2][j+2] == state[i-3][j+3]:\n return 1 if state[i][j]==self.my_piece else -1\n # check diamond wins\n for i in range(3):\n for j in range(1,4):\n if state[i+1][j] == ' ' and state[i][j]!= ' ' and state[i][j] == state[i+1][j-1] == state[i+1][j+1] == state[i+2][j]:\n return 1 if state[i][j]==self.my_piece else -1\n\n return 0 # no winner yet", "def check_game_status2(board):\n board = np.array(board)\n for i in range(7):\n for j in range(6):\n if checkWin(board, j, i, 1):\n return 1\n if checkWin(board, j, i, 2):\n return 2\n if isfull(board):\n return 0\n return -1", "def who_won(self, board):\n winners = set()\n for x,y,z in self.wins:\n if board[x] == board[y] and board[y] == board[z]:\n winners.add(board[x])\n if 1 in winners and 2 in winners:\n return 3\n if 1 in winners:\n return 1\n if 2 in winners:\n return 2\n return 0", "def test_is_winner(self):\n TestGame = TTT_Game(\"John\", \"Jane\")\n\n # Check verticals\n TestGame.board = [1, 2, \"X\", 4, 5, \"X\", 7, 8, \"X\"]\n self.assertTrue(TestGame.is_winner(\"X\"))\n TestGame.board = [\"O\", 2, 3, \"O\", 5, \"X\", \"O\", 8, \"X\"]\n self.assertTrue(TestGame.is_winner(\"O\"))\n TestGame.board = [1, \"X\", \"O\", \"O\", \"X\", 6, 7, \"X\", \"X\"]\n self.assertTrue(TestGame.is_winner(\"X\"))\n\n # Check horizontals\n TestGame.board = [\"O\", \"O\", \"O\", \"O\", 5, \"X\", 7, 8, 9]\n self.assertTrue(TestGame.is_winner(\"O\"))\n TestGame.board = [1, 2, 3, \"X\", \"X\", \"X\", 7, 8, 9]\n self.assertTrue(TestGame.is_winner(\"X\"))\n TestGame.board = [1, 2, 3, \"O\", 5, 6, \"O\", \"O\", \"O\"]\n self.assertTrue(TestGame.is_winner(\"O\"))\n\n # Check diagonals\n TestGame.board = [\"O\", \"X\", 3, 4, \"O\", \"X\", \"X\", \"O\", \"O\"]\n self.assertTrue(TestGame.is_winner(\"O\"))\n TestGame.board = [1, 2, \"X\", 4, \"X\", 6, \"X\", 8, 9]\n self.assertTrue(TestGame.is_winner(\"X\"))", "def utility(board):\n game_winner = \"\"\n #I will analyze every row first\n for i in range(0,3):\n #I check vertically and horizontally if the tokens are the same, meaning any of the two players has 3 in a row.\n if (board[i][0] == board[i][1] and board[i][0] == board[i][2] and (board[i][0] is not EMPTY)):\n #if I find a match vertically, I determine there was a winner and break the for cycle.\n game_winner = board[i][0]\n break\n elif (board[0][i] == board[1][i] and board[0][i] == board[2][i] and (board[0][i] is not EMPTY)):\n #if there is a match horizontally, I determine there was a winner and break the for cycle.\n game_winner = board[0][i]\n break\n #checking diagonals in case there were no winners neither vertically nor horizontally.\n if ((board[0][0] == board[1][1] and board[2][2] == board[0][0]) or (board[0][2] == board[1][1] and board[2][0] == board[0][2])) and (board[1][1] is not EMPTY):\n game_winner = board[1][1]\n #depending on my winning token, I will determine the value I should print. \n if game_winner == \"X\":\n return 1\n elif game_winner == \"O\":\n return -1\n #Since we are assuming we will only receive terminal boards, if no winner was found, we have a tie and should return 0.\n else:\n return 0", "def determine_winner ( self ):\n\n if self.columnWin() != None:\n return self.columnWin()\n\n elif self.diagonalWin() != None:\n return self.diagonalWin()\n\n elif self.rowWin() != None:\n return self.rowWin()\n\n else:\n return None", "def winner(self):\n\n if self.home_score > self.away_score:\n return HOME\n elif self.home_score < self.away_score:\n return VISITOR\n else:\n return TIE", "def winner(board):\n for turn in [X,O]:\n for i in range(3):\n if board[i] == [turn, turn, turn]:\n return turn\n if board[0][i] == turn and board[1][i] == turn and board[2][i] == turn:\n return turn\n if board[0][0] == turn and board[1][1] == turn and board[2][2] == turn:\n return turn\n if board[0][2] == turn and board[1][1] == turn and board[2][0] == turn:\n return turn\n return None", "def get_winner(self):\n \n current_board_state = self.get_board_state()\n current_board_state_winner = self.state_winner_map.get(current_board_state)\n if current_board_state_winner:\n return current_board_state_winner\n\n # At-least 5 moves must have been played for a possible win.\n if len(self.available_moves) > 4:\n return None\n\n winner = None\n\n for i in range(0, 3):\n row = [self.board['{}{}'.format(i, j)] for j in range(0, 3)]\n if self._all_elem_same(row):\n winner = row[0]\n break\n\n if not winner:\n for i in range(0, 3):\n row = [self.board['{}{}'.format(j, i)] for j in range(0, 3)]\n if self._all_elem_same(row):\n winner = row[0]\n break\n\n if not winner:\n row = [self.board['{}{}'.format(i, i)] for i in range(0, 3)]\n if self._all_elem_same(row):\n winner = row[0]\n\n if not winner:\n row = [self.board['{}{}'.format(2-i, i)] for i in range(0, 3)]\n if self._all_elem_same(row):\n winner = row[0]\n\n self.state_winner_map[current_board_state] = winner\n return winner", "def winner(board):\n columns = []\n for row in board:\n xcount = row.count(X)\n ocount = row.count(O)\n if xcount == 3:\n return X\n if ocount == 3:\n return O\n\n for j in range(len(board)):\n column = [row[j] for row in board]\n columns.append(column)\n \n for j in columns:\n xcounter = j.count(X)\n ocounter = j.count(O)\n if xcounter == 3:\n return X\n if ocounter == 3:\n return O\n \n if board[0][0] == O and board[1][1] == O and board[2][2] == O:\n return O\n if board[0][0] == X and board[1][1] == X and board[2][2] == X:\n return X\n if board[0][2] == O and board[1][1] == O and board[2][0] == O:\n return O\n if board[0][2] == X and board[1][1] == X and board[2][0] == X:\n return X\n\n return None", "def winner_found(self):\n\n first_row = self.find_three_in_row([self._board[0][0], self._board[0][1], self._board[0][2]])\n second_row = self.find_three_in_row([self._board[1][0], self._board[1][1], self._board[1][2]])\n third_row = self.find_three_in_row([self._board[2][0], self._board[2][1], self._board[2][2]])\n winner_in_rows = first_row or second_row or third_row\n\n first_column = self.find_three_in_row([self._board[0][0], self._board[1][0], self._board[2][0]])\n second_column = self.find_three_in_row([self._board[0][1], self._board[1][1], self._board[2][1]])\n third_column = self.find_three_in_row([self._board[0][2], self._board[1][2], self._board[2][2]])\n winner_in_columns = first_column or second_column or third_column\n\n first_diagonal = self.find_three_in_row([self._board[0][0], self._board[1][1], self._board[2][2]])\n second_diagonal = self.find_three_in_row([self._board[2][0], self._board[1][1], self._board[0][2]])\n winner_in_diagonals = first_diagonal or second_diagonal\n\n return winner_in_rows or winner_in_columns or winner_in_diagonals", "def check_winner(board):\n winner = get_winner(board)\n if winner:\n print(f\"Game Over, You Win\") if winner == \"X\" else print(\"Game Over, You Loose\") # noqa\n return winner", "def winner(board):\r\n A = board[0]\r\n B = board[1]\r\n C = board[2]\r\n\r\n if A.count(\"X\") == 3 or B.count(\"X\") == 3 or C.count(\"X\") == 3:\r\n return X\r\n elif A.count(\"O\") == 3 or B.count(\"O\") == 3 or C.count(\"O\") == 3:\r\n return O\r\n elif A[0] == B[0] and A[0] == C[0]:\r\n if A[0] == X:\r\n return X\r\n elif A[0] == O:\r\n return O\r\n elif A[1] == B[1] and A[1] == C[1]:\r\n if A[1] == X:\r\n return X\r\n elif A[1] == O:\r\n return O\r\n elif A[2] == B[2] and A[2] == C[2]:\r\n if A[2] == X:\r\n return X\r\n elif A[2] == O:\r\n return O\r\n elif A[0] == B[1] and A[0] == C[2]:\r\n if A[0] == X:\r\n return X\r\n elif A[0] == O:\r\n return O\r\n elif A[2] == B[1] and A[2] == C[0]:\r\n if A[2] == X:\r\n return X\r\n elif A[2] == O:\r\n return O\r\n else:\r\n return None", "def check_over(self):\n if self.board.has_winner() == 1:\n return 1\n elif self.board.has_winner() == 2:\n return 2\n elif self.board.check_cats_game():\n return 0\n else:\n return -1", "def utility(board):\r\n\r\n if winner(board) == \"X\":\r\n return 1\r\n elif winner(board) == \"O\":\r\n return -1\r\n else:\r\n return 0\r\n\r\n raise NotImplementedError", "def utility(self, board):\n if self.terminal(board):\n winner_side = self.winner(board)\n # Cek winner apakah X, O, atau seri\n if winner_side == self.X:\n return 1\n elif winner_side == self.O:\n return -1\n else:\n return 0", "def get_winner(state):\n state_val = get_action_score(state.action[0], state.action[1], state.action_player, state.occupied)\n if state_val == 100:\n return state.action_player\n elif len(state.available_moves) == 0:\n return 0\n else:\n return -1", "def utility(board):\n if terminal(board):\n try:\n if winner(board) == X:\n return 1\n elif winner(board) == O:\n return -1\n else:\n return 0\n except:\n raise NotImplementedError", "def player(board):\n if board == initial_state():\n return X\n \n # if board has lesser or eual X(s) than O(s)\n if sum([row.count(X) for row in board]) <= sum([row.count(O) for row in board]):\n return X\n else:\n return O", "def check_winner(board_state):\n if board_state[0] == board_state[1] == board_state[2] and board_state[0] != \" \":\n return True\n elif board_state[0] == board_state[4] == board_state[8] and board_state[0] != \" \":\n return True\n elif board_state[0] == board_state[3] == board_state[6] and board_state[0] != \" \":\n return True\n elif board_state[3] == board_state[4] == board_state[5] and board_state[3] != \" \":\n return True\n elif board_state[6] == board_state[7] == board_state[8] and board_state[6] != \" \":\n return True\n elif board_state[6] == board_state[4] == board_state[2] and board_state[6] != \" \":\n return True\n elif board_state[7] == board_state[4] == board_state[1] and board_state[7] != \" \":\n return True\n elif board_state[8] == board_state[5] == board_state[2] and board_state[8] != \" \":\n return True\n else:\n return False", "def calc_winner(self):\n pass", "def terminal_test(self, board):\r\n blackScore = board.count(core.BLACK)\r\n whiteScore = board.count(core.WHITE)\r\n if blackScore > whiteScore:\r\n return core.PLAYERS[core.BLACK]\r\n elif blackScore < whiteScore:\r\n return core.PLAYERS[core.WHITE]\r\n else:\r\n return \"TIE\"", "def winner(self):\n if (self.player):\n return (0 == reduce(lambda x, y: x+y, self.board.p1vec))\n else:\n return (0 == reduce(lambda x, y: x+y, self.board.p2vec))", "def player(board):\n\tif board == initial_state():\n\t\treturn X\n\n\tnumX=0\n\tnumO=0\n\n\tfor i in range(len(board)):\n\t\tfor j in range(len(board)):\n\t\t\tif(board[i][j]==X):\n\t\t\t\tnumX+=1\n\t\t\telif(board[i][j]==O):\n\t\t\t\tnumO+=1\n\n\tif numX > numO:\n\t\treturn O\n\telse:\n\t\treturn X", "def check_winner(self):\n if self.player1.chips <= BIG_BLIND_BET:\n return 2\n elif self.player2.chips <= BIG_BLIND_BET:\n return 1\n else:\n return 0" ]
[ "0.8226638", "0.7735134", "0.7697947", "0.7688143", "0.76567584", "0.7635804", "0.7614953", "0.76082647", "0.76045525", "0.7571411", "0.7569988", "0.7560676", "0.7552569", "0.7549131", "0.7542866", "0.75107414", "0.7508619", "0.75054234", "0.7480221", "0.7479761", "0.747914", "0.74698186", "0.7465634", "0.7464308", "0.74584675", "0.7433022", "0.7433022", "0.7433022", "0.7433022", "0.7433022", "0.7427477", "0.7411578", "0.7410344", "0.74095523", "0.73955005", "0.73840845", "0.7380923", "0.737911", "0.7379025", "0.73727703", "0.7360985", "0.7359514", "0.73577684", "0.73281115", "0.7321851", "0.73086685", "0.7306401", "0.7305002", "0.72903174", "0.72790134", "0.72788733", "0.7271748", "0.72664523", "0.7265881", "0.726381", "0.7259189", "0.7253892", "0.72527725", "0.7238967", "0.7238222", "0.7221437", "0.72210515", "0.7219367", "0.7217022", "0.719472", "0.71881443", "0.71878934", "0.7185329", "0.7175065", "0.7162913", "0.7159632", "0.7121786", "0.7101828", "0.7098844", "0.7071078", "0.706496", "0.70293105", "0.7017271", "0.70069", "0.7004533", "0.69889313", "0.69871396", "0.6985534", "0.69784087", "0.6972522", "0.69672406", "0.69496906", "0.6940827", "0.6939632", "0.69365436", "0.69339365", "0.6929293", "0.6925551", "0.6919121", "0.6916099", "0.69126254", "0.6909445", "0.68921715", "0.68469346", "0.6833594" ]
0.76951003
3
This function counts the points and returns
def count_points(p1,p2): if p1 > p2: drawWinner(1) return 1 elif p2 > p1: drawWinner(2) return 2 else: drawWinner(3) return 3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Points_Counting(self):\n return len(self.__traectory_list)", "def nr_points(self):\n return len(self.x)", "def count():", "def countPoints(self,sumation):\n if sumation == 21:\n points = 7\n elif sumation == 20:\n points = 5\n elif sumation == 19:\n points = 4\n elif sumation == 18:\n points = 3\n elif sumation == 17:\n points = 2\n elif sumation <=16:\n points = 1\n else:\n points = 0\n return points", "def getNumPoints(self, l1, l2):\n n1 = self.pointcounts[l1]\n n2 = self.pointcounts[l2]\n self.pointcounts[('Cl_%d' % self.labelcount, l1, l2)] = n1 + n2\n return (n1, n2)", "def get_points_number(self):\n ncontour = self.get_contours_number\n npoints = []\n for i in range(0, ncontour):\n npoints.append(len(self.x[i]))\n return npoints", "def GetNumberOfPoints(self):\n return self.GetNumberOfElements(ArrayAssociation.POINT)", "def count_points(roi):\r\n # Performing Mean Shift Filtering\r\n shifted = cv2.pyrMeanShiftFiltering(roi, 21, 51)\r\n\r\n # Converting the image to grayscale\r\n gray = cv2.cvtColor(shifted, cv2.COLOR_BGR2GRAY)\r\n\r\n # Thresholding using Binary and OTSU\r\n thrsh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\r\n # Using Watershed Algorithm\r\n D = ndimage.distance_transform_edt(thrsh)\r\n localMax = peak_local_max(D, indices=False, min_distance=1, labels=thrsh)\r\n markers = ndimage.label(localMax)[0]\r\n lbls = watershed(-D, markers, mask=thrsh)\r\n \r\n return lbls, len(np.unique(lbls)) - 1", "def count_data(self):\n try:\n ndata = len(self.x)\n logger.info(\"Number of data points: {0}\".format(ndata))\n except AttributeError:\n logger.error(\"Data object has not been defined\")\n ndata = 0\n return ndata", "def grid_point_count(self):\n return pytools.product(self.grid_point_counts())", "def point_count(N, S):\n\n x, y = make_grid(N)\n\n xc, yc = np.zeros_like(x), np.zeros_like(y)\n # grids for holding result of mandelbrot check\n \n z_binary = np.zeros( (N, N) )\n z_density = np.zeros( (N, N) )\n\n for (xi, i) in zip(x, xrange(N)):\n for (yi, j) in zip(y, xrange(N)):\n\n z = 0 ; s = 0\n c = complex( xi , yi ) \n abs_z = np.sqrt( z*z.conjugate() )\n # initial values for z, c, |z|, and step count\n\n for k in xrange(S):\n\n if abs_z > 2:\n break\n else:\n z_prim = z*z + c\n abs_z = np.sqrt( z_prim*z_prim.conjugate() )\n z = z_prim \n s += 1\n z_density[j, i] += 1\n \n\n\n if abs_z < 2:\n z_binary[j, i] = 1\n \n return z_binary, z_density", "def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0", "def numberOfPoints(self):\n return 20000", "def getPoints(self):\n count = 0\n for card in self.cards:\n if card.rank > 9:\n count += 10\n elif card.rank == 1:\n count += 11\n else:\n count += card.rank\n # Deduct 10 if Ace is available and needed as 1\n for card in self.cards:\n if count <= 21:\n break\n elif card.rank == 1:\n count -= 10\n return count", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def _get_keypoints_center_count(self, keypoints):\n keypoint_x = np.sum(keypoints[:, 0, 0] * (keypoints[:, 0, 1] > 0))\n keypoint_y = np.sum(keypoints[:, 1, 0] * (keypoints[:, 1, 1] > 0))\n num = float(np.sum(keypoints[:, 0, 1]))\n return np.array([keypoint_x / num, keypoint_y / num]), num", "def __len__(self):\n return self.num_points", "def num_quadrature_points(self) -> int:", "def nr_of_bees_at(self, point):\n \"\"\" Responsible for transformations \"\"\"\n pos, com, success = self.perception\n \n point = point.copy()\n \n if self.phase == 2:\n point -= self.position\n elif self.phase > 2:\n point = self.transform(point - self.position)\n\n return sum(map(lambda x: array_equal(point,x),pos))", "def get_num_points(self):\n dimensions = self.data.shape\n return dimensions[0]", "def count_constellations(points):\n\n num_points = len(points)\n edges = np.zeros((num_points, num_points), np.bool)\n for i in range(num_points):\n edges[i, i] = True\n point_i = points[i]\n for j in range(i+1, num_points):\n edges[i, j] = (point_i - points[j]) <= THRESHOLD\n edges[j, i] = edges[i, j]\n\n visited = set()\n constellations = []\n for i in range(num_points):\n if i in visited:\n continue\n\n constellations.append(build_constellation(edges, i, visited))\n\n return len(constellations)", "def count_segments_naive(self, starts, ends, points):\r\n count = [0] * len(points)\r\n \r\n for i in range(len(points)):\r\n for j in range(len(starts)):\r\n if starts[j] <= points[i] <= ends[j]:\r\n count[i] += 1\r\n \r\n return count", "def n_points(self) -> ir.IntegerValue:\n return ops.GeoNPoints(self).to_expr()", "def n_points(self):\n return self.points.shape[0]", "def sort_and_count_segments(self, starts, ends, points):\r\n \r\n # Cons: needs lot of memeory space\r\n lst = []\r\n for i in range(len(starts)): \r\n lst.append(range(starts[i], ends[i]+1))\r\n \r\n # store all the items in list\r\n lst_2 = []\r\n for sublist in lst:\r\n for item in sublist:\r\n lst_2.append(item)\r\n \r\n sorted_lst_2 = sorted(lst_2) # get sorted list\r\n \r\n count = [0] * len(points)\r\n \r\n # find item via binary search and count the occuranace of the item.\r\n for i in range(len(points)):\r\n if self.binary_search_for_count_segments(sorted_lst_2, points[i]) == points[i]:\r\n count[i] += sorted_lst_2.count(points[i])\r\n \r\n return count", "def points(self):\r\n\t\tif self.rank() >= 9:\r\n\t\t\treturn self.point_sysm[self.rank()]\r\n\t\telse:\r\n\t\t\treturn 0", "def __len__(self):\n return len(self.points)", "def __len__(self):\n return len(self.points)", "def getNumberPoints(self, move):\r\n (current_point_white, current_point_black) = self._board.get_nb_pieces()\r\n self._board.push(move)\r\n (new_point_white, new_point_black) = self._board.get_nb_pieces()\r\n self._board.pop()\r\n \r\n if(self._mycolor == 1): #black\r\n return (new_point_black-current_point_black) \r\n else:\r\n return (new_point_white-current_point_white)", "def num_points(self, f=None):\n if f is not None:\n return f(self.contexts.shape[0])\n return self.contexts.shape[0]", "def setNumberOfPoints(self):\n return self.numberOfPoints()", "def count_pegs(self):\r\n count = 0\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"1\":\r\n count += 1\r\n\r\n return count", "def count_ones(self):\r\n count = 0\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (self.cells[x][y] == 1):\r\n count = count + 1\r\n return count", "def __len__(self):\n return len(self._points)", "def count() -> int:\n pass", "def n_points(self) -> int:\n return len(self.all_df)", "def calDominationCount(p,visitedPoints):\n isDominated = utils.MultiThread(utils.dominating, zip([visitedPoints[k].mean for k in visitedPoints],repeat(p.mean)))\n dominationCount = sum(isDominated)\n print('Please _cutils.calDominantionCount(). This method is too slow.')\n return dominationCount", "def get_number_of_data_points(self):\n\n log.warning(\n \"get_number_of_data_points not implemented, values for statistical measurements such as AIC or BIC are \"\n \"unreliable\",\n )\n\n return 1.0", "def counts(e, x):\n arr = np.asarray(arr)\n return len(np.where(arr == x)[0])", "def n_points(self) -> int:\n return len(self.df)", "def num_points_in_circle(d):\n return 6 * d if d > 0 else 1", "def total_points(self, **kwargs):\n points = 0.0\n for key, value in self.stat_data.items():\n points = points + STATS[key][1](value)\n return round(points, self.__class__.default_round)", "def get_rank(self, points):\n sql_command = \"SELECT * FROM points WHERE amount > ?;\"\n cursor, connection = self.execute_command_get_connection(sql_command, [points])\n\n all = cursor.fetchall()\n cursor.close()\n connection.close()\n return len(all) + 1", "def get_pts(dat):\n return np.where(np.abs(np.diff(dat)) > 0)[0]+1", "def aces_points(dice_list):\n return dice_list.count(1) * 1", "def count_points_near_vertices(\n t, coords, bruteforce_simplex_counts=False,\n count_bincount=True, **kwargs):\n flat_tri = t.simplices.flatten()\n flat_ind = np.repeat(np.arange(t.nsimplex), 3)\n v_touches = []\n for i in range(t.npoints):\n v_touches.append(flat_ind[np.argwhere(flat_tri == i)])\n found = t.find_simplex(coords, bruteforce=bruteforce_simplex_counts)\n if count_bincount:\n bc = np.bincount(found, minlength=t.nsimplex)\n pt_count = np.array([\n bc[v_touches[i]].sum() for i in range(t.npoints)\n ])\n else:\n pt_count = np.zeros(t.npoints)\n for i in range(t.npoints):\n for j in v_touches[i]:\n pt_count[i] += np.count_nonzero(found == j)\n return pt_count", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def fours_points(dice_list):\n return dice_list.count(4) * 4", "def count(self, qid):\n\n bbox = (\n self.to_frame()\n .query(f\"id == '{qid}'\")\n .geometry.bounds.values.flatten()\n .tolist()\n )\n\n # Get points that intersect the quadrant\n point_int = list(self.sindex.intersection(bbox))\n\n return len(point_int) if point_int else 0", "def cnt_points(self, field:str, all_ghost_out:bool):\r\n\r\n eat_dot = False\r\n\r\n # When field is 'e' that means Pac-Man ate an energizer, so he can now eat ghosts and +50 will be added to his point counter\r\n if field == 'e':\r\n self.play_powerpellet()\r\n eat_dot = True\r\n self.eat_ghost = True\r\n self.energizer_flag = True\r\n self.point_counter += 50\r\n self.dot_counter += 1\r\n if self.first_eaten:\r\n self.global_counter += 1\r\n\r\n # When field is 'p' Pac-Man ate a normal point, which adds +10 to his point counter\r\n elif field != None and field[0] == 'p': \r\n self.play_chomp()\r\n eat_dot = True\r\n self.point_counter += 10\r\n self.dot_counter += 1\r\n if self.first_eaten:\r\n self.global_counter += 1\r\n\r\n # If not all ghosts are out of the ghost house the hourglass will be reset\r\n if eat_dot and not all_ghost_out:\r\n self.hourglass_counter = 0", "def points(self):\r\n\t\tif self.rank() in self.point_sysm:\r\n\t\t\treturn self.point_sysm[self.rank()]\r\n\t\telse:\r\n\t\t\treturn (self.rank() + 2)", "def count_neighboors(self, x: int, y: int) -> int :\n\n cpt : int = 0\n min_x : int = max(0, x - 1)\n max_x : int = min(x + 1, self.width-1)\n min_y : int = max(0, y - 1)\n max_y : int = min(y + 1, self.height-1)\n\n x_tmp : int\n y_tmp : int\n for x_tmp in range(min_x, max_x+1):\n for y_tmp in range(min_y, max_y+1):\n if self.is_alive(x_tmp, y_tmp) and not (x_tmp == x and y_tmp == y):\n cpt += 1\n return cpt", "def countGreatPeople(self, tCoords):\n\t\tiCount = 0\n\t\tplot = gc.getMap().plot(tCoords[0], tCoords[1])\n\t\tif plot.isCity():\n\t\t\tcity = plot.getPlotCity()\n\t\t\tiGreatPriest = gc.getInfoTypeForString(\"SPECIALIST_GREAT_PRIEST\")\n\t\t\tfor i in range(iGreatPriest, iGreatPriest+7, 1):\n\t\t\t\tiCount += city.getFreeSpecialistCount(i)\n\t\treturn iCount", "def get_correct_lap_count(self):", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def getHitCount(self): #$NON-NLS-1$\r", "def count(self):\n\n raise NotImplementedError", "def num_points_sweep(self, start, stop, step):\r\n return(abs((stop - start)//step) + 1)", "def num_points_in_distance(d):\n return 1 + 3 * d * (d + 1)", "def getAdjacentCount(grid, x, y, X, Y, char):\n count = 0\n try{\n if x == 0:\n\n if y == 0:\n\n if x == X-1:\n\n if y == Y-1:\n }", "def count(x):\n return sum(len(y) for y in x)", "def countPixels(input_img, input_mode, x, y, r):\n\n if input_mode == 'fp':\n np_img = loadImage(input_img)\n elif input_mode == 'np':\n np_img = input_img\n else:\n return (input_mode, \" is not a supported mode. Supported modes are 'np' or 'fp'.\")\n\n base_img = circleMask(np_img, x, y, r, 'exterior')\n\n core_img = circleMask(np_img, x, y, r*0.8, 'exterior')\n core_count = np.count_nonzero(base_img*core_img)\n\n inner_img = circleMask(np_img, x, y, r*0.8, 'exterior')\n inner_ring = base_img - inner_img\n inner_count = np.count_nonzero(inner_ring)\n\n outer_img = circleMask(np_img, x, y, r*1.2, 'exterior')\n outer_ring = outer_img - base_img\n outer_count = np.count_nonzero(outer_ring)\n\n\n\n return (core_count, inner_count, outer_count)", "def compute_num_tracks(x_offset: int, y_offset: int,\n x: int, y: int, track_info: Dict[int, int]):\n x_diff = x - x_offset\n y_diff = y - y_offset\n result = 0\n for length, num_track in track_info.items():\n if x_diff % length == 0 and y_diff % length == 0:\n # it's the tile\n result += num_track\n return result", "def count(self):\n return self._reduce_for_stat_function(F.count, only_numeric=False)", "def N_POINTS(self) -> int:\n try:\n with self.fs.open(\n self.get_url().replace(\".\" + self.erddap.response, \".ncHeader\")\n ) as of:\n ncHeader = of.read().decode(\"utf-8\")\n lines = [line for line in ncHeader.splitlines() if \"row = \" in line][0]\n return int(lines.split(\"=\")[1].split(\";\")[0])\n except Exception:\n pass", "def grid_point_counts(self):\n return [high-low for low, high in self._Limits]", "def get_count_life_neighbor(arr, x, y, max_x, max_y):\n\tres_count = 0\n\n\tif x > 0 and y > 0:\n\t\tif arr[y-1][x-1]:\n\t\t\tres_count += 1\n\n\tif y > 0:\n\t\tif arr[y-1][x]:\n\t\t\tres_count += 1\n\n\tif y > 0 and x < max_x:\n\t\tif arr[y-1][x+1]:\n\t\t\tres_count += 1\n\n\tif x > 0:\n\t\tif arr[y][x-1]:\n\t\t\tres_count += 1;\n\n\tif x < max_x:\n\t\tif arr[y][x+1]:\n\t\t\tres_count += 1\n\n\tif y < max_y and x > 0:\n\t\tif arr[y+1][x-1]:\n\t\t\tres_count += 1\n\n\tif y < max_y:\n\t\tif arr[y+1][x]:\n\t\t\tres_count += 1\n\n\tif y < max_y and x < max_x:\n\t\tif arr[y+1][x+1]:\n\t\t\tres_count += 1\n\n\treturn res_count", "def calculate_points(self):\n points = 0\n for power in self.stats['powers']:\n points += self.stats['powers'][power]\n return points", "def points_earned(self):\n delta_counts = self.alive_counts - self.initial_counts\n points = self.points_table * delta_counts\n points = points.reshape(-1,72) # unravel the points for easier sum\n return np.sum(points, axis=1) + super().current_points()", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def _count_seen_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if not (dx == 0 and dy == 0):\n count += 1 if _is_occupied(grid, row, col, dx, dy) else 0\n return count", "def sample_count(self):", "def obstacle_count(self):\n self.wide_scan()\n found_something = False\n counter = 0\n for distance in self.scan:\n if distance and distance < 200 and not found_something:\n found_something = True\n counter += 1\n print(\"Object # %d found, I think\" % counter)\n if distance and distance > 200 and found_something:\n found_something = False\n print(\"\\n----I SEE %d OBJECTS----\\n\" % counter)", "def count(self,color):\n count = 0\n for y in range(0,HEIGHT):\n for x in range(0,WIDTH):\n if(self.gameState[x,y]==color):\n count+=1\n return count", "def count_genotypes(genotypeList,StateGenPosData, x, y):\r\n allMos = 0\r\n nonEggs = 0\r\n Adults = 0\r\n for i in range(len(genotypeList)):\r\n gt = genotypeList[i]\r\n b = sum(1 for item in StateGenPosData if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n c = sum(1 for item in StateGenPosData if 'adult' in item[0] and 'XX' in item[1] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y))\r\n d = sum(1 for item in StateGenPosData if 'adult' in item[0] and gt in item[1] and item[2]==(x,y))\r\n## for item in StateGenPosData:\r\n## print(item[0],item[1],item[2])\r\n## if 'adult' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## d+=1\r\n## print('yay')\r\n## if not 'new' in item[0] and not 'egg' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## c+=1\r\n## if not 'new' in item[0] and not 'gestating' in item[0] and gt in item[1] and item[2]==(x,y):\r\n## b+=1\r\n allMos = allMos + b\r\n nonEggs = nonEggs + c\r\n Adults = Adults + d\r\n return allMos, nonEggs, Adults", "def nearPoints(self):\n # initialize the edge counter\n for edge in self.G.edges():\n self.edge_id__count[self.G[edge[0]][edge[1]].get(\"edgecounter\")] = 0\n \n for point in self.gps_points:\n nearest_edge = self.getNearestEdge(point)\n # print str(point.getAttributes().get(\"ID\")) + \"->\" + str(nearest_edge.getAttributes().get('Id'))\n self.addPointCountToEdge(nearest_edge)", "def get_num_hit(boxes_truth, boxes_pred, is_hit):\n out = 0\n for tbox in boxes_truth:\n for pbox in boxes_pred:\n if is_hit(tbox, pbox):\n out += 1\n return out", "def getCounts(training_data, test_row, k):\n neighbors = getNeighbors(training_data, test_row, k)\n output_vals = [row[-1] for row in neighbors]\n\n counts = dict()\n\n for i in output_vals:\n counts[i] = counts.get(i, 0) + 1\n \n return counts", "def nPoints(self):\n return self._c_param.shrake_rupley_n_points", "def count(self):\n # TODO not implemented yet\n return 0", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def numCoords(self):\n return self.nCoords", "def _count_parties(data_set): #DEMOCRATS, THEN REPUBLICANS\r\n reps = 0\r\n dems = 0\r\n for data_point in data_set:\r\n if data_point.dat_party == \"R\": reps+=1\r\n if data_point.dat_party == \"D\": dems+=1\r\n\r\n return (dems, reps)", "def count_turns(spline):\n func = spline(spline._data[0])\n turns = sum(abs(diff(sign(diff(func))))) / 2\n return turns", "def n_integral_points(self):\n if self.is_empty():\n return tuple()\n box_min, box_max = self.bounding_box()\n from sage.geometry.integral_points import rectangular_box_points\n return rectangular_box_points(box_min, box_max, self, count_only=True)", "def twos_points(dice_list):\n return dice_list.count(2) * 2", "def compute_detection_counts(kinds, valid_mask, aoi_mask, scene_counts):\n scene_counts = np.maximum(scene_counts, 1)\n if len(kinds):\n pairs = (kinds == 'pair_trawlers')\n singles = (kinds == 'single_trawler')\n scales = (kinds == 'pair_trawlers') * 2 + (kinds == 'single_trawler')\n aoi_pts = round((scales * (valid_mask & aoi_mask) / scene_counts).sum(), 1) \n aoi_pairs = round((pairs * (valid_mask & aoi_mask) / scene_counts).sum(), 1) \n else:\n aoi_pts = aoi_pairs = 0\n return aoi_pts, aoi_pairs", "def npoints(self):\n return _property_op(arctern.ST_NPoints, self)", "def n_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n running_total += 1\n return running_total if running_total > 0 else 1", "def n_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n running_total += 1\n return running_total if running_total > 0 else 1", "def counts(self):\n\n counts = defaultdict(int)\n\n for i, geom in zip(self.tree_ids, self.tree):\n point_int = list(self.sindex.intersection(geom.bounds))\n if point_int:\n counts[i] += len(point_int)\n\n return dict(counts)", "def getTotalDistance(self,points):\n return sum([self.getDistance(points[i],points[i+1]) for i in range(len(points)-1)])", "def count_2d_points(self, zmin=None, zmax=None, outfname=None):\n if zmin != None:\n if zmax == None: zmax = zmin + 10.\n ind = (self.z >= zmin)*(self.z <= zmax)\n xin = self.x[ind]; yin = self.y[ind]; zin = self.z[ind]\n self.z2d= (zmin+zmax)/2.\n else:\n xin = self.x.copy(); yin = self.y.copy(); zin = self.z.copy()\n dx = (self.xgrid[1] - self.xgrid[0])/2.\n nArr = np.zeros((self.xgrid.size, self.ygrid.size), np.int32)\n try:\n self.z2d\n print 'Counting 2D points for z =', self.z2d,' nm'\n except:\n print 'Counting all points as 2D'\n self.nArr = _count_2D_points(nArr, self.xgrid, self.ygrid, xin, yin, dx)\n print 'End Counting 2D points.'\n if outfname != None:\n try:\n self.save_2d_points(outfname=outfname)\n except:\n print 'Unable to save grid-counting data!'\n return", "def get_position_count(self):\n return self.positions.count()", "def pointlength(x):\n return 0.0", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "def counts(self):\n return sum(self.counter.values()), len(self.visited)" ]
[ "0.74205756", "0.71399736", "0.71250033", "0.70320505", "0.6990405", "0.69521356", "0.6948015", "0.6936048", "0.68467045", "0.68396676", "0.66850936", "0.66713077", "0.65779823", "0.6508164", "0.64867896", "0.64867896", "0.64867896", "0.64867896", "0.6444945", "0.64027125", "0.6399369", "0.63686", "0.63542753", "0.6317059", "0.6306645", "0.62951565", "0.62770796", "0.62750727", "0.627114", "0.62450135", "0.62450135", "0.62128824", "0.62122756", "0.6206301", "0.62006766", "0.6199137", "0.6187682", "0.61864203", "0.61625475", "0.6154853", "0.6134826", "0.61276627", "0.6123801", "0.6119677", "0.6111616", "0.6111434", "0.61086786", "0.61054265", "0.6097558", "0.6095103", "0.6073318", "0.6038553", "0.6031024", "0.6026329", "0.6025", "0.6013718", "0.60067075", "0.6006679", "0.5999018", "0.59924334", "0.59741205", "0.5967149", "0.5960545", "0.5954514", "0.5931921", "0.5930497", "0.592709", "0.5921065", "0.5916385", "0.59143436", "0.58920175", "0.5888756", "0.5886594", "0.58797276", "0.5879235", "0.58525723", "0.5848396", "0.5842275", "0.584088", "0.58352816", "0.5834581", "0.5833693", "0.5829599", "0.58269805", "0.5824415", "0.5824277", "0.58223534", "0.581861", "0.5817753", "0.5805531", "0.5800032", "0.5793893", "0.5793893", "0.5790765", "0.5790148", "0.5788254", "0.5787712", "0.57872", "0.5763081", "0.5763081" ]
0.7081302
3
This method says who is the winner on top of the board
def drawWinner(result): # determines who is the winner from the result if result == 1: text = "Black player is the winner !" elif result == 2: text = "White player is the winner !" else: text = "Tie Game !" #draws the text as in a surface winner_surf = BASICFONT.render(text, True, BLACK) winner_rect = winner_surf.get_rect() winner_rect.topleft = ((WINDOWWIDTH - winner_rect.width)/2, 20) DISPLAYSURF.blit(winner_surf, winner_rect)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def winner(board):\n\t#For X\n\tiswinnerX = winnerForPlayer(board, X)\n\tiswinnerO = winnerForPlayer(board, O)\n\n\tif iswinnerX:\n\t\treturn X\n\tif iswinnerO:\n\t\treturn O\n\n\treturn None", "def determine_winner1(self): \r\n sorted_player_rank = self._rank()\r\n print(f\"sorted player rank: {sorted_player_rank}\")\r\n print(f\"winner is player {sorted_player_rank[0]}: with points {sorted_player_rank[0][1]}\")", "def winner(self, board):\n if self.any_legal_move(BLACK, board) or self.any_legal_move(WHITE,board):\n return None\n scoreBlack = self.score(BLACK, board)\n scoreWhite = self.score(WHITE, board)\n if scoreBlack > scoreWhite: return PLAYERS[BLACK]\n elif scoreBlack < scoreWhite: return PLAYERS[WHITE]\n else: return TIE", "def winner(board):\n #To determine the winner, I need to know the board's final value. \n token_value = utility(board)\n #if it's 1, X won. If it's -1, O won. Else, it was a tie.\n if(token_value == 1):\n return 'X'\n elif(token_value == -1):\n return 'O'\n else:\n return None", "def declare_winner(board):\n results = count_chips(board, 0), count_chips(board, 1)\n winner = '¡El color {} ha ganado la partida!'\n for i in range(2):\n if results[i] > results[i - 1]:\n print(winner.format(PLAYER_COLORS[i]) + '\\n')\n if results[0] == results[1]:\n print('¡Empate!\\n')\n print('Puntajes:')\n for i in range(2):\n print('{}: {} punto(s)'.format(PLAYER_COLORS[i].title(), results[i]))", "def winner(board):\n # finite list of possible wins\n winnings = [\n (0, 0), (0, 1), (0, 2), \n (1, 0), (1, 1), (1, 2),\n (2, 0), (2, 1), (2, 2),\n (0, 0), (1, 0), (2, 0),\n (0, 1), (1, 1), (2, 1),\n (0, 2), (1, 2), (2, 2),\n (0, 0), (1, 1), (2, 2),\n (2, 0), (1, 1), (0, 2)\n ]\n # if the board has one of the lists in winnings \n # then the piece in one of those spots is the winner\n xcount = 0\n ocount = 0\n for i in range(len(winnings)):\n if(board[winnings[i][0]][winnings[i][1]] == X):\n xcount += 1\n if(board[winnings[i][0]][winnings[i][1]] == O):\n ocount += 1\n if((i + 1) % 3 == 0):\n if(ocount == 3 or xcount == 3):\n return board[winnings[i][0]][winnings[i][1]]\n else:\n ocount = 0\n xcount = 0\n return EMPTY", "def winner(self):\n\n\t\tfor player in [1,2]:\n\t\t\twon = np.full((self.boardSize), player)\n\n\t\t\t# Check diagonals\n\t\t\tif(np.array_equal(np.diag(self.board), won)): return player\n\t\t\tif(np.array_equal(np.diag(np.fliplr(self.board)), won)): return player\n\n\t\t\t# Check lines and columns\n\t\t\tfor i in range(self.boardSize):\n\t\t\t\tif(np.array_equal(self.board[i], won)): return player\n\t\t\t\tif(np.array_equal(self.board[:,i], won)): return player\n\n\t\t# Draw\n\t\tif(not(0 in self.board)): return 3\n\n\t\t# No win or draw\n\t\treturn 0", "def winner(board):\n \n possible_wins = []\n row1 = board[0]\n row2 = board[1]\n row3 = board[2]\n col1 = [board[0][0],board[1][0],board[2][0]]\n col2 = [board[0][1],board[1][1],board[2][1]]\n col3 = [board[0][2],board[1][2],board[2][2]]\n diag1 = [board[0][0],board[1][1],board[2][2]]\n diag2 = [board[2][0],board[1][1],board[0][2]]\n \n possible_wins.append(row1)\n possible_wins.append(row2)\n possible_wins.append(row3)\n possible_wins.append(col1)\n possible_wins.append(col2)\n possible_wins.append(col3)\n possible_wins.append(diag1)\n possible_wins.append(diag2)\n \n for trait in possible_wins:\n if trait.count(\"X\") == 3:\n return \"X\"\n elif trait.count(\"O\") == 3:\n return \"O\"\n \n return None", "def winner(board):\n black_count = board.count(-1)\n white_count = board.count(1)\n if black_count > white_count:\n return (-1, black_count, white_count)\n elif white_count > black_count:\n return (1, black_count, white_count)\n else:\n return (0, black_count, white_count)", "def winner(self):\n\n if self.home_score > self.away_score:\n return HOME\n elif self.home_score < self.away_score:\n return VISITOR\n else:\n return TIE", "def winner(self):\n raise NotImplementedError", "def winner(self):\n raise NotImplementedError", "def who_won(self, board):\n winners = set()\n for x,y,z in self.wins:\n if board[x] == board[y] and board[y] == board[z]:\n winners.add(board[x])\n if 1 in winners and 2 in winners:\n return 3\n if 1 in winners:\n return 1\n if 2 in winners:\n return 2\n return 0", "def winner(self):\n # Credit to Dariusz Walczak for inspiration.\n # http://stackoverflow.com/questions/1720421/merge-two-lists-in-python\n moves = [p.possible_moves(p.pieces, self) for p in self.players]\n if False in [mv == [] for mv in moves]:\n return (\"None\")\n else:\n cand = [(p.score, p.name) for p in self.players]\n return (sorted(cand, reverse=True)[0][1])", "def has_winner(self):\r\n\r\n\t\t\"Check for horizonal win\"\r\n\r\n\t\tfor x in range(0, 3):\r\n\r\n\t\t\tif self.game_board[x][0] == self.game_board[x][1] and self.game_board[x][1] == self.game_board[x][2]:\r\n\r\n\t\t\t\treturn self.game_board[x][0]\r\n\r\n\t\t\"Check for vertical win\"\r\n\r\n\t\tfor y in range(0, 3):\r\n\r\n\t\t\tif self.game_board[0][y] == self.game_board[1][y] and self.game_board[1][y] == self.game_board[2][y]:\r\n\r\n\t\t\t\treturn self.game_board[0][y]\r\n\r\n\t\t\"Check for diagonal from left to right\"\r\n\t\r\n\t\tif self.game_board[0][0] == self.game_board[1][1] and self.game_board[1][1] == self.game_board[2][2]:\r\n\t\t\treturn self.game_board[1][1]\t\r\n\r\n\t\tif self.game_board[0][2] == self.game_board[1][1] and self.game_board[1][1] == self.game_board[2][0]:\r\n\t\t\treturn self.game_board[1][1]\t\r\n\r\n\t\tif self.count == 8:\r\n\r\n\t\t\treturn \"Tie\"\r\n\r\n\t\telse:\r\n\r\n\t\t\treturn \"0\"\r\n\r\n\r\n\t\tpass", "def winner(self):\n if (self.player):\n return (0 == reduce(lambda x, y: x+y, self.board.p1vec))\n else:\n return (0 == reduce(lambda x, y: x+y, self.board.p2vec))", "def winner(board):\r\n\r\n #rows:\r\n if (board[0][0] == board[0][1] == board[0][2]) and (board[0][0] == \"X\" or board[0][0] == \"O\"):\r\n return board[0][0]\r\n if (board[1][0] == board[1][1] == board[1][2]) and (board[1][0] == \"X\" or board[1][0] == \"O\"):\r\n return board[1][0]\r\n if (board[2][0] == board[2][1] == board[2][2]) and (board[2][0] == \"X\" or board[2][0] == \"O\"):\r\n return board[2][0]\r\n\r\n #columns\r\n if (board[0][0] == board[1][0] == board[2][0]) and (board[0][0] == \"X\" or board[0][0] == \"O\"):\r\n return board[0][0]\r\n if (board[0][1] == board[1][1] == board[2][1]) and (board[0][1] == \"X\" or board[0][1] == \"O\"):\r\n return board[0][1]\r\n if (board[0][2] == board[1][2] == board[2][2]) and (board[0][2] == \"X\" or board[0][2] == \"O\"):\r\n return board[0][2]\r\n\r\n #diagonals\r\n if (board[0][0] == board[1][1] == board[2][2]) and (board[0][0] == \"X\" or board[0][0] == \"O\"):\r\n return board[0][0]\r\n if (board[0][2] == board[1][1] == board[2][0]) and (board[0][2] == \"X\" or board[0][2] == \"O\"):\r\n return board[0][2]\r\n \r\n return None\r\n\r\n raise NotImplementedError", "def winner(board):\n # Hard code winning moves\n # row0\n if board[0][0] == board[0][1] == board[0][2] == X:\n return X\n elif board[0][0] == board[0][1] == board[0][2] == O:\n return O\n # row1\n elif board[1][0] == board[1][1] == board[1][2] == X:\n return X\n elif board[1][0] == board[1][1] == board[1][2] == O:\n return O\n # row2\n elif board[2][0] == board[2][1] == board[2][2] == X:\n return X\n elif board[2][0] == board[2][1] == board[2][2] == O:\n return O\n # col0\n elif board[0][0] == board[1][0] == board[2][0] == X:\n return X\n elif board[0][0] == board[1][0] == board[2][0] == O:\n return O\n # col1\n elif board[0][1] == board[1][1] == board[2][1] == X:\n return X\n elif board[0][1] == board[1][1] == board[2][1] == O:\n return O\n # col2\n elif board[0][2] == board[1][2] == board[2][2] == X:\n return X\n elif board[0][2] == board[1][2] == board[2][2] == O:\n return O\n # diagonal\n elif board[0][0] == board[1][1] == board[2][2] == X:\n return X\n elif board[0][0] == board[1][1] == board[2][2] == O:\n return O\n # inverse diagonal\n elif board[0][2] == board[1][1] == board[2][0] == X:\n return X\n elif board[0][2] == board[1][1] == board[2][0] == O:\n return O\n\n return None", "def winner(self, board):\n # Cek baris\n if all(i == board[0][0] for i in board[0]):\n return board[0][0]\n elif all(i == board[1][0] for i in board[1]):\n return board[1][0]\n elif all(i == board[2][0] for i in board[2]):\n return board[2][0]\n \n # Cek kolom\n elif board[0][0] == board[1][0] and board[1][0] == board[2][0]:\n return board[0][0]\n elif board[0][1] == board[1][1] and board[1][1] == board[2][1]:\n return board[0][1]\n elif board[0][2] == board[1][2] and board[1][2] == board[2][2]:\n return board[0][2]\n \n # Cek diagonal\n elif board[0][0] == board[1][1] and board[1][1] == board[2][2]:\n return board[0][0]\n elif board[0][2] == board[1][1] and board[1][1] == board[2][0]:\n return board[0][2]\n else:\n return None", "def determine_winner(self):\r\n for i in range(2):\r\n # Way of the Stone (capture opponent master)\r\n if not self.bitboard_king[i]:\r\n return 1 - i * 2\r\n # Way of the Stream (move master to opposite square)\r\n if self.bitboard_king[i] == self.WIN_BITMASK[i]:\r\n return i * 2 - 1\r\n return 0", "def calc_winner(self):\n pass", "def _set_winner(self):\n b_count = 0\n w_count = 0\n\n for i_row in range(self._num_rows):\n for i_col in range(self._num_cols):\n if self._board[i_row][i_col] == \"B\":\n b_count += 1\n elif self._board[i_row][i_col] == \"W\":\n w_count += 1\n\n if b_count == w_count:\n self._winner = \"NONE\"\n elif self._how_to_win == \">\":\n self._winner = \"B\" if b_count > w_count else \"W\"\n elif self._how_to_win == \"<\":\n self._winner = \"B\" if b_count < w_count else \"W\"", "def winner(board):\n black_count = board.count(-1)\n white_count = board.count(1)\n if black_count > white_count:\n #if black_count + white_count != 64:\n # black_count += (64 - black_count - white_count)\n return (-1, black_count, white_count)\n elif white_count > black_count:\n #if black_count + white_count != 64:\n # white_count += (64 - black_count - white_count)\n return (1, black_count, white_count)\n else:\n return (0, black_count, white_count)", "def winner(board):\n for row in range(len(board)):\n if board[row].count(X) == 3:\n return X\n elif board[row].count(O) == 3:\n return O\n for column in range(len(board[row])):\n if board[0][column] == X and board[1][column] == X and board[2][column] == X:\n return X\n elif board[0][column] == O and board[1][column] == O and board[2][column] == O:\n return O\n\n if (board[0][0] == X and board[1][1] ==X and board[2][2] ==X) or (board[0][2] == X and board[1][1]==X and board[2][0] ==X):\n return X\n\n elif (board[0][0] == O and board[1][1] == O and board[2][2] == O) or (board[0][2] == O and board[1][1]== O and board[2][0] ==O):\n return O\n\n else: return None\n\n\n\n # raise NotImplementedError", "def winner(self):\n return (\"None\")", "def check_winner(self):\n if DotsAndBoxesState.score1 > 4: # Because the total score is fixed at nine, if player's score is greater than four,\n # then the player is the winner.\n return \"A\"\n else:\n return \"B\"", "def getWinner(self):\n global vertical_win\n global done\n lines = (\n self.board, # columns\n zip(*self.board), # rows\n diagonalsPos(self.board, self.cols, self.rows), # positive diagonals\n diagonalsNeg(self.board, self.cols, self.rows) # negative diagonals\n )\n\n for sublist in self.board:\n if sublist[0] == sublist[1] == sublist[2] == sublist[3] or sublist[1] == sublist[2] == sublist[3] == \\\n sublist[4] or sublist[2] == sublist[3] == sublist[4] == sublist[5]:\n vertical_win = True\n\n for line in chain(*lines):\n for color, group in groupby(line):\n if color != 0 and len(list(group)) >= self.win:\n done = True\n return color\n counter = 0\n for sublist in self.board:\n for i in sublist:\n if i != 0:\n counter += 1\n if counter == 42:\n done = True\n return Tie", "def winner(board):\n\n # Check for horizontal wins\n for row in board:\n if row[0] == row[1] == row[2] and row[0] is not None:\n return row[0]\n\n # Check for vertical wins\n for i in range(3):\n if board[0][i] == board[1][i] == board[2][i] and board[0][i] is not None:\n return board[0][i]\n\n # Check for diagonal wins\n if board[0][0] == board[1][1] == board[2][2] and board[0][0] is not None:\n return board[0][0]\n if board[0][2] == board[1][1] == board[2][0] and board[0][2] is not None:\n return board[0][2]\n\n # If there is no winner, return None\n return None", "def winner(self):\n if self.board.winner() is None:\n return None\n return self.token_lookup[self.board.winner()]", "def winner(board):\n for i in range(3):\n if board[i][0] == board[i][1] == board[i][2] != None:\n return board[i][0]\n for i in range(3):\n if board[0][i] == board[1][i] == board[2][i] != None:\n return board[0][i]\n if board[0][0] == board[1][1] == board[2][2]:\n return board[0][0]\n if board[0][2] == board[1][1] == board[2][0]:\n return board[0][2]\n return None", "def winner(board):\n if board[0][0] != EMPTY and (board[0][0] == board[0][1] == board[0][2] \n or board[0][0] == board[1][1] == board[2][2] \n or board[0][0] == board[1][0] == board[2][0]):\n return board[0][0]\n\n elif board[1][1] != EMPTY and (board[1][0] == board[1][1] == board[1][2]\n or board[0][1] == board[1][1] == board[2][1]):\n return board[1][1]\n \n elif board[2][2] != EMPTY and (board[0][2] == board[1][2] == board[2][2]\n or board[2][0] == board[2][1] == board[2][2]):\n return board[2][2]\n \n elif board[2][0] != EMPTY and (board[2][0] == board[1][1] == board[0][2]):\n return board[2][0]\n \n else:\n None", "def winner(board):\n d_board = np.array(board)\n board_list = d_board.tolist()\n if board[0][0] == \"O\" and board[1][1] == \"O\" and board[2][2] == \"O\":\n return O\n elif board[0][0] == \"X\" and board[1][1] == \"X\" and board[2][2] == \"X\":\n return X\n elif board[0][2] == \"O\" and board[1][1] == \"O\" and board[2][0] == \"O\":\n return O\n elif board[0][2] == \"X\" and board[1][1] == \"X\" and board[2][0] == \"X\":\n return X\n else:\n for row in board_list:\n if row.count(\"X\") == 3:\n return X\n elif row.count(\"O\") == 3:\n return O\n board_list = d_board.transpose().tolist()\n for row in board_list:\n if row.count(\"X\") == 3:\n return X\n elif row.count(\"O\") == 3:\n return O\n return None\n\n #raise NotImplementedError", "def determineWinner(self):\n if self.game_state.numActive() == 1:\n for player in self.game_state.player_list:\n if self.game_state.active_dict[player.name]:\n print \"\"\n print player.name + \" wins with\"\n for card in self.player_hand_dict[player.name]:\n print card\n print \"and takes \" + str(self.game_state.pot) + \" chips!\"\n self.game_state.player_chips[player.name] += self.game_state.pot\n return\n\n for player in self.game_state.player_list:\n for card in self.game_state.board:\n self.player_hand_dict[player.name].append(Card(card.suit, card.rank))\n hand_ranking = HandRanking(self.game_state.player_list, self.player_hand_dict)\n hand_ranking.rankHands()\n winning_rank = -1\n winner = None\n tie_list = []\n \"\"\" Get winning rank, only consider active players for the pot \"\"\"\n for player in self.game_state.player_list:\n if self.game_state.active_dict[player.name] == True:\n if DEBUG:\n print \"Considering \" + str(player.name) + \"'s hand for the pot.\"\n if hand_ranking.player_ranks_dict[player.name] > winning_rank:\n winning_rank = hand_ranking.player_ranks_dict[player.name]\n winner = player \n tie_list = []\n tie_list.append(player)\n elif hand_ranking.player_ranks_dict[player.name] == winning_rank:\n tie_list.append(player)\n \"\"\" winner should never be equal to None \"\"\"\n\n \"\"\" Check for tie and resolve if needed \"\"\"\n if len(tie_list) > 1:\n if DEBUG:\n print \"found potential tie...\"\n for player in tie_list:\n print player.name + \"'s hand:\"\n for card in hand_ranking.player_best_hand_dict[player.name]:\n print card\n print \"resolving tie...\"\n result_tie_list = self.resolveTie(hand_ranking, tie_list)\n print \"\"\n self.printPlayersHands()\n for player in result_tie_list:\n print player.name + \",\",\n print \" wins with\",\n hand_ranking.printRanking(winning_rank)\n print \"and takes \" + str(self.game_state.pot / len(tie_list)) + \" chips!\"\n for player in result_tie_list:\n self.game_state.player_chips[player.name] += self.game_state.pot / len(tie_list)\n else:\n print \"\"\n self.printPlayersHands()\n print winner.name + \" wins with\",\n hand_ranking.printRanking(winning_rank)\n print \"and takes \" + str(self.game_state.pot) + \" chips!\"\n self.game_state.player_chips[winner.name] += self.game_state.pot", "def getWinner (self):\r\n\t\tlines = (\r\n\t\t\tself.board, # columns\r\n\t\t\tzip(*self.board), # rows\r\n\t\t\tBoard.diagonalsPos(self.board, self.cols, self.rows),\r\n\t\t\tBoard.diagonalsNeg(self.board, self.cols, self.rows) \r\n\t\t)\r\n\r\n\t\tfor line in chain(*lines):\r\n\t\t\tfor figure, group in groupby(line):\r\n\t\t\t\tif figure != NONE and len(list(group)) >= self.win:\r\n\t\t\t\t\treturn figure", "def get_winner(self):\n winner: Player = Player('none')\n points_winner = 0\n for player in self.players:\n for key, value in player.get_stats().items():\n print('{}: {}'.format(key, value))\n if key == 'points':\n if value >= points_winner:\n winner = player\n print()\n\n print('The winner is: ' + winner.get_name())\n return winner", "def board_status(board):\n if terminal(board):\n victor = winner(board)\n if victor is not None:\n emit(\"game_over\", \"winner: \" + victor)\n else:\n emit(\"game_over\", \"Draw\")", "def utility(board):\n winners = winner(board)\n if (X == winners):\n return 1\n elif (O == winners):\n return -1\n return 0", "def _accounce_winner(self):\n\n winner = sorted(((player.get_name(), player.get_last_roll(), player.get_total_score())\n for player in self._players.get_players()),\n key=lambda player: (player[1]),\n reverse=True)[0]\n\n print(\"\\n\\nCongratulations {}, you rolled a {} and your total score is {}. You won the game!\"\n .format(winner[0], winner[1], winner[2]))", "def winner(board):\n \n for m in [\"XXX\", \"OOO\"]:\n # horizontal\n for row in range(3):\n if board[row][0] == board[row][1] == board[row][2]:\n return board[row][0]\n # vertical\n for col in range(3):\n if board[0][col] == board[1][col] == board[2][col]:\n return board[0][col]\n # diagonal\n if board[0][0] == board[1][1] == board[2][2]:\n return board[1][1]\n if board[0][2] == board[1][1] == board[2][0]:\n return board[1][1]\n return None", "def winner(board):\n x_in_board = []\n o_in_board = []\n winning_positions = [\n [[0, 0], [0, 1], [0, 2]],\n [[1, 0], [1, 1], [1, 2]],\n [[2, 0], [2, 1], [2, 2]],\n [[0, 0], [1, 0], [2, 0]],\n [[0, 1], [1, 1], [2, 1]],\n [[0, 2], [1, 2], [2, 2]],\n [[0, 0], [1, 1], [2, 2]],\n [[0, 2], [1, 1], [2, 0]]\n ]\n\n for i in range(len(board)):\n for j in range(len(board)):\n if board[i][j] == X:\n x_in_board.append([i, j])\n elif board[i][j] == O:\n o_in_board.append([i, j])\n\n for i in winning_positions:\n if i[0] in x_in_board and i[1] in x_in_board and i[2] in x_in_board:\n return X\n elif i[0] in o_in_board and i[1] in o_in_board and i[2] in o_in_board:\n return O\n\n return None", "def who_won(self):\n if self.scoreB >= WP: return Stone.BLACK\n if self.scoreW >= WP: return Stone.WHITE\n return self.last", "def winner(board):\n # Horizontal win check\n\n for i in board:\n if i[0] == i[1] and i[0] == i[2] and i[0] != EMPTY:\n if i[0] == X:\n return X\n else:\n return O\n \n # Vertical win check\n \n if board[0][0] == board[1][0] and board[0][0] == board[2][0]:\n if board[0][0] != EMPTY:\n if board[0][0] == X:\n return X\n else:\n return O\n if board[0][1] == board[1][1] and board[0][1] == board[2][1]:\n if board[0][1] != EMPTY:\n if board[0][1] == X:\n return X\n else:\n return O\n if board[0][2] == board[1][2] and board[0][2] == board[2][2]:\n if board[0][2] != EMPTY:\n if board[0][2] == X:\n return X\n else:\n return O\n\n # Diagonal win check\n\n if board[0][0] == board[1][1] and board[0][0] == board[2][2]:\n if board[0][0] != EMPTY:\n if board[0][0] == X:\n return X\n else:\n return O\n if board[0][2] == board[1][1] and board[0][2] == board[2][0]:\n if board[0][2] != EMPTY:\n if board[0][2] == X:\n return X\n else:\n return O\n\n return None", "def determine_winner ( self ):\n\n if self.columnWin() != None:\n return self.columnWin()\n\n elif self.diagonalWin() != None:\n return self.diagonalWin()\n\n elif self.rowWin() != None:\n return self.rowWin()\n\n else:\n return None", "def winner(board):\n if board[0][0] == board[0][1] == board[0][2] != EMPTY:\n if board[0][0] == X:\n return X\n else:\n return O\n\n elif board[1][0] == board[1][1] == board[1][2] != EMPTY:\n if board[1][0] == X:\n return X\n else:\n return O\n\n elif board[2][0] == board[2][1] == board[2][2] != EMPTY:\n if board[2][0] == X:\n return X\n else:\n return O\n\n elif board[0][0] == board[1][0] == board[2][0] != EMPTY:\n if board[0][0] == X:\n return X\n else:\n return O\n\n elif board[0][1] == board[1][1] == board[2][1] != EMPTY:\n if board[0][1] == X:\n return X\n else:\n return O\n\n elif board[0][2] == board[1][2] == board[2][2] != EMPTY:\n if board[0][2] == X:\n return X\n else:\n return O\n\n elif board[0][0] == board[1][1] == board[2][2] != EMPTY:\n if board[0][0] == X:\n return X\n else:\n return O\n\n elif board[0][2] == board[1][1] == board[2][0] != EMPTY:\n if board[0][2] == X:\n return X\n else:\n return O\n else:\n return None\n\n #raise NotImplementedError", "def winner(board):\n columns = []\n for row in board:\n xcount = row.count(X)\n ocount = row.count(O)\n if xcount == 3:\n return X\n if ocount == 3:\n return O\n\n for j in range(len(board)):\n column = [row[j] for row in board]\n columns.append(column)\n \n for j in columns:\n xcounter = j.count(X)\n ocounter = j.count(O)\n if xcounter == 3:\n return X\n if ocounter == 3:\n return O\n \n if board[0][0] == O and board[1][1] == O and board[2][2] == O:\n return O\n if board[0][0] == X and board[1][1] == X and board[2][2] == X:\n return X\n if board[0][2] == O and board[1][1] == O and board[2][0] == O:\n return O\n if board[0][2] == X and board[1][1] == X and board[2][0] == X:\n return X\n\n return None", "def winner(board):\n for i in (O, X):\n for j in range(3):\n if (board[j][0] == i and board[j][1] == i and board[j][2] == i):\n return i\n if (board[0][j] == i and board[1][j] == i and board[2][j] == i):\n return i\n if (board[0][0] == i and board[1][1] == i and board[2][2] == i):\n return i\n if (board[2][0] == i and board[1][1] == i and board[0][2] == i):\n return i\n return None", "def utility(board):\n win = winner(board)\n if win == X: return 1\n elif win == O: return - 1\n else: return 0", "def test_winner_won(self):\n board = self.board_class()\n board.place_token(1, 1, 'X')\n board.place_token(0, 0, 'O')\n board.place_token(1, 0, 'X')\n board.place_token(0, 2, 'O')\n board.place_token(1, 2, 'X')\n assert board.calc_winner() == 'X'", "def winner(board):\n for i in range(3):\n firstnumber=board[i][0]\n if firstnumber!=EMPTY:\n secondnumber=board[i][1]\n if secondnumber==firstnumber:\n if board[i][2]==secondnumber:\n return secondnumber\n else:\n continue\n else:\n continue\n else:\n continue\n for i in range(3):\n firstnumber=board[0][i]\n if firstnumber!=EMPTY:\n secondnumber=board[1][i]\n if secondnumber==firstnumber:\n if board[2][i]==secondnumber:\n return secondnumber\n else:\n continue\n else:\n continue\n else:\n continue\n firstnumber=board[0][0]\n if firstnumber!= EMPTY:\n if board[1][1]==firstnumber:\n if board[2][2]==firstnumber:\n return firstnumber\n firstnumber=board[2][0]\n if firstnumber!= EMPTY:\n if board[1][1]==firstnumber:\n if board[0][2]==firstnumber:\n return firstnumber\n return None\n raise NotImplementedError", "def winFor(self,player):\n if(self.cachedWin == False):\n won = False;\n if(player==WHITE):\n for x in range(0,WIDTH):\n if(self.gameState[x,0]==WHITE):\n won = True\n \n elif(player==BLACK):\n for x in range(0,WIDTH):\n if(self.gameState[x,HEIGHT-1]==BLACK):\n won = True\n \n if(len(self.successors()) == 0):#IF there are no available moves for both players\n bCount = self.count(BLACK) #check who has the most pawns\n wCount = self.count(BLACK)\n if(bCount>wCount):\n self.cachedWin = True\n self.cachedWinner = player\n return True\n if(wCount>bCount):\n self.cachedWin = True\n self.cachedWinner = player\n return True\n \n if(won):\n self.cachedWin = True\n self.cachedWinner = player\n return True\n else:\n return False\n else:\n return player == self.cachedWinner", "def get_winner(board):\n\n def who_won(in_a_row, board_size, cur_player):\n \"\"\" \n a function private to get_winner() (yes you can do this. Cool huh!?) \n that tells get_winner if it has a winner \n \"\"\"\n if in_a_row == board_size:\n return 1 if cur_player == 'X' else 2\n else:\n return 0\n\n def test_row_col(board, rows):\n \"\"\" private function to test the rows and columns \"\"\"\n for i in range(len(board)):\n cur_player = board[i][0] if rows else board[0][i]\n in_a_row = 0\n for j in range(len(board)):\n symbol = board[i][j] if rows else board[j][i]\n if (not symbol == '-') and (symbol == cur_player):\n in_a_row += 1\n else:\n break\n winner = who_won(in_a_row, len(board), cur_player)\n if not winner == 0:\n return winner\n return 0\n\n def test_diagonal(board, normal):\n \"\"\" private function to test the two diagonals \"\"\"\n cur_player = board[0][0] if normal else board[0][len(board)-1]\n in_a_row = 0\n for i in range(len(board)):\n symbol = board[i][i] if normal else board[i][len(board)-1-i]\n if (not symbol == '-') and (symbol == cur_player):\n in_a_row += 1 \n else:\n break\n winner = who_won(in_a_row, len(board), cur_player)\n if not winner == 0:\n return winner\n return 0\n\n\n # test rows\n winner = test_row_col(board, True)\n if not winner == 0:\n return winner\n\n # test cols\n winner = test_row_col(board, False)\n if not winner == 0:\n return winner\n\n # test diagonal from top left to bottom right\n winner = test_diagonal(board, True)\n if not winner == 0:\n return winner\n\n # test diagonal from top right to bottom left\n winner = test_diagonal(board, False)\n if not winner == 0:\n return winner\n\n return 0", "def board_label(board):\n # 0 draw for side-to-move, 1 win for side-to-move (more than 50 moves), 2 win for side-to-move\n # -1 loss in more than 50, -2 loss in <50\n with chess.syzygy.open_tablebase(\"./\") as tablebase:\n # board = chess.Board(\"8/2K5/4B3/3N4/8/8/4k3/8 b - - 0 1\")\n wdl = tablebase.probe_wdl(board)\n\n # 0 draw, x win in x, -x loss in x\n # counts may be off by 1\n with chess.syzygy.open_tablebase(\"./\") as tablebase:\n # board = chess.Board(\"8/2K5/4B3/3N4/8/8/4k3/8 b - - 0 1\")\n dtz = tablebase.probe_dtz(board)\n if wdl == 0:\n win = 0\n draw = 1\n loss = 0\n elif wdl > 0:\n win = 1\n draw = 0\n loss = 0\n else:\n win = 0\n draw = 0\n loss = 1\n if dtz > 0:\n quality = 2000 - dtz\n elif dtz < 0:\n quality = -2000 - dtz\n else:\n quality = 0\n\n return win, draw, loss, quality", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def won(self):\n for y in range(self.size):\n winning = []\n for x in range(self.size):\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n for x in range(self.size):\n winning = []\n for y in range(self.size):\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n winning = []\n for y in range(self.size):\n x = y\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n winning = []\n for y in range(self.size):\n x = self.size-1-y\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n return None", "def winner(self):\n state = self.state\n if state == State.X_WON:\n return Square.X\n if state == State.O_WON:\n return Square.O\n return None", "def getWinner(board):\n players = [X, O]\n num_symbols_in_line = 3\n for player in players:\n # check rows\n for row in board:\n line_count = row.count(player)\n if line_count == num_symbols_in_line:\n return player\n \n # check columns\n for col_i in range(len(board[0])):\n line_count = 0\n for row_i in range(len(board)):\n if board[row_i][col_i] == player:\n line_count += 1\n if line_count == num_symbols_in_line:\n return player\n \n # check vertical from top left to bottom right\n line_count = 0\n for vert_cell in range(len(board)):\n if board[vert_cell][vert_cell] == player:\n line_count += 1\n if line_count == num_symbols_in_line:\n return player\n \n # check vertical from top right to bottom left\n line_count = 0\n col_i = len(board) - 1\n for row_i in range(len(board)):\n if board[row_i][col_i] == player:\n line_count += 1\n col_i -= 1\n if line_count == num_symbols_in_line:\n return player\n\n return None", "def utility(board):\n status = winner(board)\n if status == X:\n return 1\n elif status == O:\n return -1\n else:\n return 0", "def check_winner(self):\n for row in self.board.values():\n if all([mark == \"x\" for mark in row]):\n return self.player_1\n elif all([mark == \"o\" for mark in row]):\n return self.player_2\n\n # checks every column\n for i in range(3):\n first_row, second_row, third_row = self.board.values()\n if first_row[i] == \"x\" and second_row[i] == \"x\" and third_row[i] == \"x\":\n return self.player_1\n elif first_row[i] == \"o\" and second_row[i] == \"o\" and third_row[i] == \"o\":\n return self.player_2\n\n # checks the diagonals\n if self.board[\"a\"][0] == \"x\" and self.board[\"b\"][1] == \"x\" and self.board[\"c\"][2] == \"x\":\n return self.player_1\n if self.board[\"a\"][2] == \"o\" and self.board[\"b\"][1] == \"o\" and self.board[\"c\"][0] == \"o\":\n return self.player_2\n\n return None", "def get_winner(self):\n return self.winner", "def test_get_winner():\n\n board = Board()\n board1 = Board()\n board2 = Board()\n\n # board is empty\n board.find_winner(0)\n assert board.get_winner() == board.EMPTY\n\n # vertical win\n for i in range(4):\n board.move(Board.P1, 1)\n\n for i in range(3):\n board.move(Board.P2, 2)\n board.find_winner(1)\n assert board.get_winner() == board.P1\n\n \"\"\"\n Board looks like:\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|-|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|-|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|-|X|-|-|-|-|-|2\n +-+-+-+-+-+-+-+\n 3|-|X|O|-|-|-|-|3\n +-+-+-+-+-+-+-+\n 4|-|X|O|-|-|-|-|4\n +-+-+-+-+-+-+-+\n 5|-|X|O|-|-|-|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n \"\"\"\n\n # horizontal win\n for i in range(4):\n board1.move(Board.P2, i)\n for i in range(3):\n board1.move(Board.P1, 1)\n board1.find_winner(2)\n assert board1.get_winner() == board.P2\n\n \"\"\"\n Board1 looks like:\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|-|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|-|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|-|X|-|-|-|-|-|2\n +-+-+-+-+-+-+-+\n 3|-|X|-|-|-|-|-|3\n +-+-+-+-+-+-+-+\n 4|-|X|-|-|-|-|-|4\n +-+-+-+-+-+-+-+\n 5|O|O|O|O|-|-|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n \"\"\"\n\n # diagonal win\n\n board2.move(Board.P1, 1)\n board2.move(Board.P2, 2)\n\n board2.move(Board.P1, 2)\n board2.move(Board.P2, 3)\n\n board2.move(Board.P1, 4)\n board2.move(Board.P2, 3)\n\n board2.move(Board.P1, 3)\n board2.move(Board.P2, 5)\n\n board2.move(Board.P1, 4)\n board2.move(Board.P2, 4)\n\n board2.move(Board.P1, 4)\n\n board2.find_winner(1)\n assert board2.get_winner() == board.P1\n\n \"\"\"\n Board 2 looks like\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|-|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|-|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|-|-|-|-|X|-|-|2\n +-+-+-+-+-+-+-+\n 3|-|-|-|X|O|-|-|3\n +-+-+-+-+-+-+-+\n 4|-|-|X|O|X|-|-|4\n +-+-+-+-+-+-+-+\n 5|-|X|O|O|X|O|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n \"\"\"", "def get_winner(state):\n\n if", "def check_for_winner(self):\n\n winner = self.__winner_in_row__()\n winner = self.__winner_in_column__()\n winner = self.__winner_in_diag__()\n\n if winner is None:\n winner = self.__boardFull__()\n \n return winner", "def winner(self):\n state = self._state['visible']\n if state['reserve'][0] < 1:\n return 1\n elif state['reserve'][1] < 1:\n return 0\n return -1", "def winner(board):\r\n A = board[0]\r\n B = board[1]\r\n C = board[2]\r\n\r\n if A.count(\"X\") == 3 or B.count(\"X\") == 3 or C.count(\"X\") == 3:\r\n return X\r\n elif A.count(\"O\") == 3 or B.count(\"O\") == 3 or C.count(\"O\") == 3:\r\n return O\r\n elif A[0] == B[0] and A[0] == C[0]:\r\n if A[0] == X:\r\n return X\r\n elif A[0] == O:\r\n return O\r\n elif A[1] == B[1] and A[1] == C[1]:\r\n if A[1] == X:\r\n return X\r\n elif A[1] == O:\r\n return O\r\n elif A[2] == B[2] and A[2] == C[2]:\r\n if A[2] == X:\r\n return X\r\n elif A[2] == O:\r\n return O\r\n elif A[0] == B[1] and A[0] == C[2]:\r\n if A[0] == X:\r\n return X\r\n elif A[0] == O:\r\n return O\r\n elif A[2] == B[1] and A[2] == C[0]:\r\n if A[2] == X:\r\n return X\r\n elif A[2] == O:\r\n return O\r\n else:\r\n return None", "def winner(board):\n\n # Check none empty horizontals\n for i in range(3):\n if board[i][0] and board[i][0] == board[i][1] == board[i][2]:\n return board[i][0]\n\n # Check none empty verticals\n for j in range(3):\n if board[0][j] and board[0][j] == board[1][j] == board[2][j]:\n return board[0][j]\n\n # Check none empty L-R diagonal\n if board[0][0] and board[0][0] == board[1][1] == board[2][2]:\n return board[0][0]\n\n # Check none empty R-L diagonal\n if board[0][2] and board[0][2] == board[1][1] == board[2][0]:\n return board[0][2]", "def winner(board):\n for i in range(len(board)):\n\n # Check rows\n if board[i][0] == board[i][1] == board[i][2] and not board[i][1] == EMPTY:\n return board[i][1]\n\n # Check columns\n elif board[0][i] == board[1][i] == board[2][i] and not board[1][i] == EMPTY:\n return board[1][i]\n\n # Check diagonals\n if board[0][0] == board[1][1] == board[2][2] and not board[1][1] == EMPTY:\n return board[1][1]\n\n if board[2][0] == board[1][1] == board[0][2] and not board[1][1] == EMPTY:\n return board[1][1]\n\n # No winner if get to this point\n return None", "def utility(board):\n winner_player = winner(board)\n if winner_player == X:\n return 1\n elif winner_player == O:\n return -1\n else:\n return 0", "def check_winner(self):\n pass", "def check_for_winner(self):\r\n\r\n # Iterate through the rows\r\n for row in range(self.height):\r\n if self.board[row][0] == self.board[row][1] == self.board[row][2] and self.board[row][0] != None:\r\n return Board.WIN if self.board[row][0] else Board.LOSS\r\n\r\n # Iterate through the columns\r\n for col in range(self.width):\r\n if self.board[0][col] == self.board[1][col] == self.board[2][col] and self.board[0][col] != None:\r\n return Board.WIN if self.board[0][col] else Board.LOSS\r\n\r\n # Diagonals\r\n if self.board[0][0] == self.board[1][1] == self.board[2][2] and self.board[0][0] != None:\r\n return Board.WIN if self.board[0][0] else Board.LOSS\r\n if self.board[0][2] == self.board[1][1] == self.board[2][0] and self.board[0][2] != None:\r\n return Board.WIN if self.board[0][2] else Board.LOSS\r\n\r\n # No winner yet\r\n return 0", "def winner(self, black, white):\n fill(255)\n rect(150, 150, 150, 80, 7)\n fill(0)\n textSize(20)\n if self.tie:\n text(\"It's Tie\", 160, 180)\n elif self.black_wins:\n text(\"Black WINS\", 160, 180)\n elif self.white_wins:\n text(\"White WINS\", 160, 180)\n result = \"black: \" + str(black)\n text(result, 160, 200)\n result = \"white: \" + str(white)\n text(result, 160, 220)", "def winner(self):\n if self.__current_player == 1:\n if self.__fields[0].winner():\n print(self.__players[0]._Player__name + \"is winner!\")\n Game.play = False\n elif self.__current_player == 2:\n if self.__fields[1].winner():\n print(self.__players[1]._Player__name + \"is winner!\")\n Game.play = False", "def checkWinner(self):\n\n # Check horizontal and vertical lines.\n for j in range(3):\n if self.board[0][j] != \" \" and self.board[0][j] == self.board[1][j] == self.board[2][j]:\n return self.board[0][j]\n if self.board[j][0] != \" \" and self.board[j][0] == self.board[j][1] == self.board[j][2]:\n return self.board[j][0]\n\n # Check diagonals.\n if self.board[0][0] != \" \" and self.board[0][0] == self.board[1][1] == self.board[2][2]:\n return self.board[0][0]\n elif self.board[0][2] != \" \" and self.board[0][2] == self.board[1][1] == self.board[2][0]:\n return self.board[0][2]\n\n # Check if there is a tie.\n if self.empty_cells == 0:\n return \"Tie\"\n\n # No winner yet.\n return None", "def winner(board):\n # check columns\n for j in range(3):\n if board[1][j] == board[0][j] and board[0][j] == board[2][j] and board[1][j] != EMPTY:\n return board[1][j]\n # check rows\n for i in range(3):\n if board[i][0] == board[i][1] and board[i][1] == board[i][2] and board[i][0] != EMPTY:\n return board[i][0]\n # check diagnols\n if board[0][0] == board[1][1] and board[1][1] == board[2][2] and board[0][0] != EMPTY:\n return board[1][1]\n if board[0][2] == board[1][1] and board[1][1] == board[2][0] and board[0][2] != EMPTY:\n return board[1][1]\n return None", "def winner(board):\n # check rows\n for row in range(len(board)):\n if board[row][0] == board[row][1] == board[row][2]:\n if board[row][0] == X:\n return X\n elif board[row][0] == O:\n return O\n else:\n return None\n \n # check cols\n for col in range(len(board[0])):\n if board[0][col] == board[1][col] == board[2][col]:\n if board[0][col] == X:\n return X\n elif board[0][col] == O:\n return O\n else:\n return None\n\n # Check diagonal\n if board[0][0] == board[1][1] == board[2][2]:\n if board[0][0] == X:\n return X\n elif board[0][0] == O:\n return O\n else:\n return None\n\n # Check diagonal\n if board[0][2] == board[1][1] == board[2][0]:\n if board[0][2] == X:\n return X\n elif board[0][2] == O:\n return O\n else:\n return None\n\n # else\n return None", "def winner(board) -> any:\n numeric_board = [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]\n\n total_horizon = [0, 0, 0]\n total_vertical = [0, 0, 0]\n total_diagonally = [0, 0]\n for i, row in enumerate(board):\n for j, column in enumerate(row):\n if column == X:\n numeric_board[i][j] = 1\n total_horizon[i] += 1\n total_vertical[j] += 1\n elif column == O:\n numeric_board[i][j] = -1\n total_horizon[i] += -1\n total_vertical[j] += -1\n\n n = len(numeric_board)\n total_diagonally[0] = sum(numeric_board[i][i] for i in range(n))\n total_diagonally[1] = sum(numeric_board[i][n - i - 1] for i in range(n))\n\n if 3 in total_horizon or 3 in total_vertical or 3 in total_diagonally:\n return X\n elif -3 in total_horizon or -3 in total_vertical or -3 in total_diagonally:\n return O\n else:\n return None", "def win(board):\n # COLUMNS\n if board[7] == board[4] == board[1] != ' ': # first column down\n return board[7]\n elif board[8] == board[5] == board[2] != ' ': # second column down\n return board[8]\n elif board[9] == board[6] == board[3] != ' ': # third column down\n return board[9]\n\n # ROWS\n if board[7] == board[8] == board[9] != ' ': # first row across\n return board[7]\n elif board[4] == board[5] == board[6] != ' ': # second row across\n return board[4]\n elif board[1] == board[2] == board[3] != ' ': # third row across\n return board[1]\n\n # DIAGONALS\n if board[7] == board[5] == board[3] != ' ': # diagonal staring on left\n return board[7]\n elif board[9] == board[5] == board[1] != ' ': # diagonal starting on right\n return board[9]\n\n # else no winner\n return \"No\"", "def utility(board):\n game_winner = \"\"\n #I will analyze every row first\n for i in range(0,3):\n #I check vertically and horizontally if the tokens are the same, meaning any of the two players has 3 in a row.\n if (board[i][0] == board[i][1] and board[i][0] == board[i][2] and (board[i][0] is not EMPTY)):\n #if I find a match vertically, I determine there was a winner and break the for cycle.\n game_winner = board[i][0]\n break\n elif (board[0][i] == board[1][i] and board[0][i] == board[2][i] and (board[0][i] is not EMPTY)):\n #if there is a match horizontally, I determine there was a winner and break the for cycle.\n game_winner = board[0][i]\n break\n #checking diagonals in case there were no winners neither vertically nor horizontally.\n if ((board[0][0] == board[1][1] and board[2][2] == board[0][0]) or (board[0][2] == board[1][1] and board[2][0] == board[0][2])) and (board[1][1] is not EMPTY):\n game_winner = board[1][1]\n #depending on my winning token, I will determine the value I should print. \n if game_winner == \"X\":\n return 1\n elif game_winner == \"O\":\n return -1\n #Since we are assuming we will only receive terminal boards, if no winner was found, we have a tie and should return 0.\n else:\n return 0", "def winner(board):\n for turn in [X,O]:\n for i in range(3):\n if board[i] == [turn, turn, turn]:\n return turn\n if board[0][i] == turn and board[1][i] == turn and board[2][i] == turn:\n return turn\n if board[0][0] == turn and board[1][1] == turn and board[2][2] == turn:\n return turn\n if board[0][2] == turn and board[1][1] == turn and board[2][0] == turn:\n return turn\n return None", "def utility(board):\n\n # Determine winner\n victor = winner(board)\n\n # Assign proper values accordingly\n if victor == X:\n return 1\n elif victor == O:\n return -1\n else:\n return 0", "def pick_winner(self):\r\n self.convert_face_cards_to_numbers()\r\n main_suit = self.cards_int[0][0] # suit that dominates this round\r\n winner_index = 0 # by default 1st player wins until we find a bigger card in same suit\r\n winner_card_value = self.cards_int[0][1]\r\n for index, card in enumerate(self.cards_int[1:]):\r\n if main_suit == card[0]:\r\n if winner_card_value < card[1]:\r\n winner_index = index+1\r\n winner_card_value = card[1]\r\n\r\n return winner_index", "def utility(board):\n win = winner(board)\n if win == \"X\":\n return 1\n elif win == \"O\":\n return -1\n else:\n return 0", "def get_winner(self, board):\r\n for p_id in self.player_ids:\r\n win_array = np.array([p_id] * self.board_width, dtype=np.int8)\r\n for i in range(self.board_width):\r\n # check rows\r\n if np.array_equal(board[i], win_array):\r\n return p_id\r\n # check columns\r\n elif np.array_equal(board[:, i], win_array):\r\n return p_id\r\n # check leading diagonal\r\n elif np.array_equal(np.diagonal(board), win_array):\r\n return p_id\r\n # check non-leading diagonal\r\n elif np.array_equal(np.diagonal(np.flipud(board)), win_array):\r\n return p_id\r\n # return nan if no wins losses or draws\r\n for i in np.nditer(board):\r\n if i == 0:\r\n return np.nan\r\n # must be a draw so return 0\r\n return 0", "def utility(board):\n final = winner(board)\n if final == X:\n return 1\n elif final == O:\n return -1\n else:\n return 0", "def check_win(self):\r\n wins = [self.check_rows(), self.check_cols(), self.check_diag()]\r\n for case, pos in wins:\r\n if case != -1:\r\n print('Game over!')\r\n if self.grid[case][-1] == self.computer:\r\n print('The computer won!')\r\n return (True, pos)\r\n print('The player won!')\r\n return (True, pos)\r\n\r\n return (self.check_draw(), None)", "def check_over(self):\n if self.board.has_winner() == 1:\n return 1\n elif self.board.has_winner() == 2:\n return 2\n elif self.board.check_cats_game():\n return 0\n else:\n return -1", "def get_winner(self) -> chr:\n return self._winner", "def next_player(self):\n # Counter is a useful class that counts objects.\n count = Counter(self.board)\n if count.get('X', 0) > count.get('O', 0):\n return 'O'\n return 'X'", "def check_winner(board):\n winner = get_winner(board)\n if winner:\n print(f\"Game Over, You Win\") if winner == \"X\" else print(\"Game Over, You Loose\") # noqa\n return winner", "def counter_opponent_win(self):\n\n # get essential values\n board = self.get_game_space()\n affinity = self.get_opponent().get_affinity()\n \n # pick the right check for the game we are playing\n if isinstance(board, Gomoku):\n \n # get the possible ways for the opponent to win\n possible_wins = board.get_wins(affinity)\n winning_blocks = board.get_winning_blocks(affinity)\n best_move = None\n\n # sort the best win to counter \n for win in possible_wins:\n if best_move is None: best_move = win\n elif win[0] <= best_move[0]: \n if win[1] >= best_move[1]:\n best_move = win\n if best_move is not None: possible_wins.remove(best_move)\n return best_move", "def test_winner(state_board):\n\tres = 3 #default value is tie game\n\tptsb = 0 #points for the black\n\tptsw = 0 #points for the white\n\t\n\t#looks in the board if there is an empty case while\n\t# counting the number of points for each player\n\tfor i in state_board:\n\t\tfor j in i:\n\t\t\tif j == 0:\n\t\t\t\tres = 0\n\t\t\telif j == 1:\n\t\t\t\tptsb += 1\n\t\t\telif j == 2:\n\t\t\t\tptsw += 1\n\t\n\t#if there is an empty case, looks for possibilities\n\t# for the other player, if no possibility test for the points\n\t#if no empty case\n\t# test for points\n\t#else return 0\n\tif res == 0:\n\t\tif possible(state_board,1) == []:\n\t\t\tif possible(state_board,2) == []:\n\t\t\t\tres = count_points(ptsb,ptsw)\n\t\t\telse:\n\t\t\t\tres = 5\n\t\telif possible(state_board,2) == []:\n\t\t\tres = 4\n\telse:\n\t\tres = count_points(ptsb,ptsw)\n\treturn res", "def has_winner(self):\n\n if self.num_black_pieces == 0 or len(self.get_all_valid_moves(Player.black)) == 0:\n return Player.white\n elif self.num_white_pieces == 0 or len(self.get_all_valid_moves(Player.white)) == 0:\n return Player.black\n elif self.repetition_happened() or self.passive_game():\n return \"Tie\"\n else:\n return None", "def utility(board):\n won = winner(board)\n\n if won == X:\n return 1\n elif won == O:\n return -1\n else:\n return 0", "def get_winner(self):\n combos = [\n [0, 1, 2],\n [3, 4, 5],\n [6, 7, 8],\n [0, 3, 6],\n [1, 4, 7],\n [2, 5, 8],\n [0, 4, 8],\n [2, 4, 6],\n ]\n winner = None\n for combo in combos:\n a, b, c = combo\n if (\n self.board[a] is not None\n and self.board[a] == self.board[b]\n and self.board[a] == self.board[c]\n ):\n winner = self.board[a]\n break\n return winner", "def utility(board) -> int:\n winner_player = winner(board)\n if winner_player == X:\n return 1\n elif winner_player == O:\n return -1\n else:\n return 0", "def winner(board):\n # Check Rows\n for row in board:\n if row[0] != EMPTY and row[0] == row[1] and row[0] == row[2]:\n return row[0]\n \n # Check Columns\n for j in range(3):\n if board[0][j] != EMPTY and board[0][j] == board[1][j]:\n if board[0][j] == board[2][j]:\n return board[0][j]\n \n # Check Diagonals\n if board[1][1] != EMPTY:\n if board[0][0] == board[1][1] and board[0][0] == board[2][2]:\n return board[0][0]\n if board[0][2] == board[1][1] and board[0][2] == board[2][0]:\n return board[0][2]\n\n return None", "def winner():\n winning_lbl_zero.grid(row=0, column=LEFT_COL, rowspan=80, columnspan=2, sticky=N) # Placing the winning image\n messagebox.showinfo(title=\"**** WINNER! ****\", message=\"CONGRATS!!\\n\"\n \"You figured out the word/phrase\\n\"\n \"before it was too late, clearly your\\n\"\n \"guessing skills are unfathomable\")\n\n play_again() # Finds out if they'd like to play again", "def determine_winner(score1, score2):\n if score1 == score2:\n return 'tie'\n elif score1 == 21:\n return 'player1'\n elif score2 == 21:\n return 'player2'\n elif score1 > 21 or score2 > 21:\n if score1 > 21 and score2 > 21:\n if score1 - 21 < score2 - 21:\n return 'player1'\n else:\n return 'player2'\n elif score2 < 21 < score1:\n return 'player2'\n elif score1 < 21 < score2:\n return 'player1'\n elif score1 < 21 and score2 < 21:\n if score1 - score2 > 0:\n return 'player1'\n else:\n return 'player2'\n else:\n return None", "def player(board):\n\tif board == initial_state():\n\t\treturn X\n\n\tnumX=0\n\tnumO=0\n\n\tfor i in range(len(board)):\n\t\tfor j in range(len(board)):\n\t\t\tif(board[i][j]==X):\n\t\t\t\tnumX+=1\n\t\t\telif(board[i][j]==O):\n\t\t\t\tnumO+=1\n\n\tif numX > numO:\n\t\treturn O\n\telse:\n\t\treturn X", "def utility(board):\n winning_player = winner(board)\n\n if winning_player is X:\n return 1\n if winning_player is O:\n return -1\n \n return 0", "def winner(board):\n # Checking for 3 in a row\n for row in board:\n if row[0] is not EMPTY and row[0] == row[1] == row[2]:\n return row[0]\n\n # Checking for 3 in a col\n for col in range(len(board)):\n if board[0][col] is not EMPTY and board[0][col] == board[1][col] == board[2][col]:\n return board[0][col]\n\n # Checking for Diagonals\n if board[0][0] is not EMPTY and board[0][0] == board[1][1] == board[2][2]:\n return board[0][0]\n \n if board[0][2] is not EMPTY and board[0][2] == board[2][0] == board[1][1]:\n return board[0][2]\n\n return None", "def determine_round_winner(self):\n\n if self.getX() + self.SIZE[0] < 0:\n # point for player two\n return 2\n elif self.getX() > Configuration.windowWidth:\n # point for player one\n return 1" ]
[ "0.7595406", "0.7561998", "0.75591666", "0.7541609", "0.7517451", "0.75065154", "0.75041556", "0.74950504", "0.74286133", "0.7426717", "0.7375604", "0.7375604", "0.7337236", "0.72708327", "0.7246243", "0.7231121", "0.72244394", "0.7210622", "0.72083884", "0.71932673", "0.7186487", "0.71830726", "0.7165416", "0.7140795", "0.71366084", "0.7128342", "0.71223694", "0.71144223", "0.71133214", "0.71010613", "0.71010095", "0.7093958", "0.7078661", "0.7074339", "0.70640254", "0.7062051", "0.70594513", "0.7059197", "0.7051619", "0.70378536", "0.703047", "0.7007115", "0.70061415", "0.7000651", "0.69970244", "0.6976801", "0.6963794", "0.6961597", "0.69612056", "0.69529665", "0.69464856", "0.6946464", "0.6943516", "0.6935918", "0.6929775", "0.6926079", "0.69231164", "0.69217885", "0.6918278", "0.69168466", "0.6911553", "0.69101304", "0.6905625", "0.6900161", "0.6898115", "0.68980604", "0.6896556", "0.6883192", "0.687758", "0.68767905", "0.6875814", "0.6873682", "0.68722534", "0.6869388", "0.68552077", "0.6849741", "0.684823", "0.6848042", "0.6846789", "0.68442297", "0.68396163", "0.6838187", "0.6836474", "0.6832513", "0.6826142", "0.6824089", "0.6802225", "0.6797013", "0.67946416", "0.678918", "0.6780296", "0.67747885", "0.6765035", "0.6764446", "0.6757908", "0.6751163", "0.67462385", "0.6745997", "0.67439365", "0.67435837", "0.6739269" ]
0.0
-1
Extract ngram feature for single dataStr
def __processSingleInstance(self, dataStr): rsltList = [self.__depdData.GetDependName(d) \ for d in self.__depdData.SplitDenpStr(dataStr)] return rsltList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract(self, document):\n f_num = len(self.feature_list)\n feature_vector = np.zeros((f_num,))\n words = document.split()\n for i in xrange(len(words)):\n for n in self.ns:\n ngram = self.try_get_ngram(words, n, i)\n if ngram and ngram in self.ngrams:\n self.add_ngram(feature_vector, ngram)\n return feature_vector", "def get_ngram_features(train_data, test_data):\n print(\"getting ngram features\")\n ngram_vectorizer = CountVectorizer(ngram_range = (1, 2))\n ngram_vectorizer = ngram_vectorizer.fit(train_data)\n return ngram_vectorizer.transform(train_data), ngram_vectorizer.transform(test_data)", "def ngram(self,phrase,n,unity=\"w\"):\n return self._support.ngram(phrase,n,unity)", "def get_ngrams(seq, n):\n return", "def propername_featurize(input_data,N, MinFreq,model_choice =\"NGram\"):\n def to_lowercase(text):\n return text.lower()\n\n def remove_URL(text):\n return re.sub(r\"http\\S+\", \"\", text)\n def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words\n\n def tokenize(text):\n return text.split()\n def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stop_word:\n new_words.append(word)\n return new_words\n def detokenize_words(words):\n separator = ' '\n return separator.join(words)\n def preprocess_text(df):\n df['text'] = df['text'].apply(to_lowercase)\n df['text'] = df['text'].apply(remove_URL)\n df['text'] = df['text'].apply(tokenize)\n df['text'] = df['text'].apply(remove_non_ascii)\n df['text'] = df['text'].apply(detokenize_words) \n return df\n def character_ngram(text_matrix, N, MinFreq): #array of non-tokenized text\n #tokenize\n all_tokenized_text = []\n #build all token\n flatten_tokenized_text = []\n for j in text_matrix:\n cur_text = \"\".join(j.split())\n cur_feature = []\n \n for i in range(N[0]-1,N[1]): \n \n for l in range(len(cur_text) - i):\n cur_feature.append(cur_text[l:l+i+1])\n \n all_tokenized_text.append(cur_feature)\n flatten_tokenized_text.extend(cur_feature)\n charfreq = {}\n for i in flatten_tokenized_text:\n if i not in charfreq.keys():\n charfreq[i] = 1\n else:\n charfreq[i] += 1\n selected_feature = []\n for i, item in charfreq.items():\n if item >= MinFreq:\n selected_feature.append(i)\n dim = len(selected_feature)\n encoded_matrix = []\n selected_feature = np.array(selected_feature)\n for i in all_tokenized_text:\n cur_text = np.array(i)\n cur_encoded = np.zeros(dim)\n cur_idx = []\n for j in range(len(cur_text)):\n idx = np.where(selected_feature == cur_text[j]) \n if len(idx[0]) != 0: \n cur_idx.append(idx[0][0])\n #binary character presence \n cur_encoded[cur_idx] = 1\n\n encoded_matrix.append(cur_encoded)\n encoded_matrix = np.array(encoded_matrix)\n\n return encoded_matrix, selected_feature\n def task_specific_featurize(feature_value):\n feature_dic = {\"contain_numerics\":[], \"contain_special_punc\":[],\"contain_inc\":[],\"Small_token_length\":[]}\n special_pun = \"&\\?-:%\"\n company_col = [\"co.\",\"inc.\"]\n def hasNumbers(string):\n return any(char.isdigit() for char in string)\n for i in text_feature:\n if hasNumbers(i):\n feature_dic[\"contain_numerics\"].append(1)\n else:\n feature_dic[\"contain_numerics\"].append(0)\n Spec_Punc = False\n for l in special_pun:\n if i.find(l) != -1:\n feature_dic[\"contain_special_punc\"].append(1)\n Spec_Punc = True\n break\n if Spec_Punc == False:\n feature_dic[\"contain_special_punc\"].append(0)\n Contain_Com = False\n for l in company_col:\n if i.find(l) != -1:\n feature_dic[\"contain_inc\"].append(1)\n Contain_Com = True\n break\n if Contain_Com == False:\n feature_dic[\"contain_inc\"].append(0)\n token_length = len(i.split())\n if token_length <= 1:\n feature_dic[\"Small_token_length\"].append(1)\n else:\n feature_dic[\"Small_token_length\"].append(0)\n\n encoded_matrix = pd.DataFrame(feature_dic).values\n selected_feature = list(feature_dic.keys()) \n return encoded_matrix, selected_feature\n # TODO: Implement featurization of input.\n matrix_processed = preprocess_text(input_data)\n text_feature = matrix_processed[[\"text\"]].values.flatten() \n if model_choice == \"NGram\":\n \n encoded_matrix, selected_feature = character_ngram(text_feature, N, MinFreq)\n elif model_choice == \"TS\":\n encoded_matrix, selected_feature = task_specific_featurize(text_feature)\n elif model_choice == \"Combined\":\n\n encoded_matrix_specific, selected_feature_specific = task_specific_featurize(text_feature) \n encoded_matrix_bow, selected_feature_bow = character_ngram(text_feature, N, MinFreq)\n encoded_matrix = np.hstack((encoded_matrix_bow,encoded_matrix_specific))\n selected_feature = list(selected_feature_bow)\n selected_feature.extend(selected_feature_specific)\n \n return encoded_matrix,selected_feature", "def extract_feats(word, nlp):\n feat_dict = {}\n feat_string = ''\n doc = nlp(word).to_dict()[0][0]\n if 'feats' in doc:\n for pair in doc['feats'].split('|'):\n feat, val = pair.split('=')\n feat_dict[feat] = val\n feat_string += feat + ': ' + val + ', '\n if feat_string:\n feat_string = ' (' + feat_string[:-2] + ')'\n return feat_dict, feat_string", "def try_get_ngram(self, words, len_n, end_pos):\n if end_pos >= len_n - 1:\n ngram = u' '.join(words[end_pos - len_n + 1:end_pos + 1:])\n return ngram\n return None", "def get_nnp_ngrams(original_text, highlight=4, minsize=0):\n minsize = minsize-1\n if minsize<0:\n minsize = 0 \n \n tokens = nltk.wordpunct_tokenize(original_text)\n tagged = nltk.word_tokenize(original_text)\n tagged = nltk.pos_tag(tokens)\n #for word in tagged:\n # print word\n doc_length = len(tokens)\n counter = 0\n counter2 = 0\n if highlight==0:\n concated_test = doc_length # This is set to doc_length but could be anything recommend 3.\n else:\n concated_test = highlight\n list_of_NNPs = []\n while counter < (doc_length-1):\n while counter2 < concated_test:\n counter2 = counter2+1\n counter3 = 0\n #print '--------------------'\n temp_array = []\n all_nnp = True\n while counter3 < counter2:\n if counter < (doc_length-counter3):\n #print tokens[counter+counter3],tagged[counter+counter3][1]\n temp_array.append(tokens[counter+counter3])\n if tagged[counter+counter3][1] != 'NNP':\n all_nnp = False\n counter3 = counter3+1\n counter3 = 0\n if all_nnp == True:\n if(len(temp_array)>minsize):\n list_of_NNPs.append(temp_array)\n #print 'added to main array'\n #else:\n #print 'not all NNPs'\n counter2 = 0\n counter = counter+1\n #for ngram in list_of_NNPs:\n # print ngram\n import itertools\n list_of_NNPs.sort()\n unique_NNPs = list(list_of_NNPs for list_of_NNPs,_ in itertools.groupby(list_of_NNPs))\n return unique_NNPs", "def ngram_encoding(self, data):\n _buffer = list()\n for word in data:\n if self.composition == \"bi-lstm\" or self.composition == \"addition\":\n ngrams = self.word_to_ngrams(word)\n _buffer.append(self.padding(ngrams, self.max_ngram_per_word,\n self.ngram_to_id[\"<PAD>\"]))\n else:\n sys.exit(\"Unknown composition\")\n return _buffer", "def add_ngram(self, feature_vector, ngram):\n raise NotImplementedError('NgramExtractorBase:add_ngram() is not defined')", "def n_gram(data, headers, cat_labels, num_labels):\n\t\tn_grammed = []\n\n\t\tfor i, tok in enumerate(data):\n\t\t\tif i == 0:\n\t\t\t\tn_grammed.append(data[-1]+tok+data[i+1])\n\t\t\telif i == len(data) - 1:\n\t\t\t\tn_grammed.append(data[i-1]+tok+data[0])\n\t\t\telse:\n\t\t\t\tn_grammed.append(data[i-1]+tok+data[i+1])\n\n\t\tn_grammed_headers = [header + \"_min1\" for header in headers] + headers + [header + \"_pls1\" for header in headers]\n\t\tn_grammed_cat_labels = [lab + \"_min1\" for lab in cat_labels] + cat_labels + [lab + \"_pls1\" for lab in cat_labels]\n\t\tn_grammed_num_labels = [lab + \"_min1\" for lab in num_labels] + num_labels + [lab + \"_pls1\" for lab in num_labels]\n\n\t\treturn n_grammed, n_grammed_headers, n_grammed_cat_labels, n_grammed_num_labels", "def ner(text = None, dist=False):\n r = []\n if text != None:\n r = requests.post(\"https://api.nb.no/ngram/ner\", json={'text':text,'dist':dist})\n return r.json()", "def extract_ngrams(self, sequence):\n sequence = self.prefix + sequence + self.suffix\n for i, event in enumerate(sequence[self.n:], self.n):\n yield event, sequence[i-self.n: i]", "def ngrams(name_string, n=3):\n\n string = re.sub(r'[,-./]|\\sBD', r'', name_string)\n n_grams = zip(*[string[i:] for i in range(n)])\n return [''.join(n_gram) for n_gram in n_grams]", "def get_ngram(n, sentence):\n if n == 1:\n return sentence\n \n # create phrases model to find words and ngrams that occur at least once\n ngram = Phraser(Phrases(sentence, min_count=1, threshold=1))\n\n # for bigrams and higher grams\n for i in range(3,n):\n ngram = Phraser(Phrases(ngram[sentence], min_count=1, threshold=1))\n return ngram[sentence]", "def word_ngrams(sent, n):\n\t\n\t# EXAMPLES \n\t# > word_ngrams(tokenize('hello world'), 1)\n\t# [('hello',), ('world',)]\n\t# > word_ngrams(tokenize('hello world'), 2)\n\t# [('<s>', 'hello'), ('hello', 'world'), ('world', '</s>')]\n\n\t# YOUR CODE HERE\n\ttokenized_sent = tokenize(sent)\n\tif n != 1:\n\t\ttokenized_sent.insert(0, '<s>')\n\t\ttokenized_sent.append('</s>')\n\treturn [tuple(tokenized_sent[i:i + n]) for i in range(0, len(tokenized_sent)-n+1)]", "def apply_ngram_filter(self, fn):\n self._apply_filter(lambda ng, f: fn(*ng))", "def get_ngram_features(data, subsequences, overlapping=False):\n features = pd.DataFrame(index=data.index)\n \n for subseq in subsequences:\n if overlapping:\n features[subseq] = data.sequence.apply(find_overlapping, args=(subseq, ))\n else:\n features[subseq] = data.sequence.str.count(subseq)\n \n \n return features", "def ngramas(n, string):\n\n ngrams = []\n i = 0\n while i + n < len(string):\n ngrams.append(string[i:i + n])\n i += 1\n\n return ngrams", "def ngrams(self):\n return self.__ngrams(())", "def ngrams(sequence, n):\n # credit: http://stackoverflow.com/questions/2380394/simple-implementation-of-n-gram-tf-idf-and-cosine-similarity-in-python\n sequence = list(sequence)\n count = max(0, len(sequence) - n + 1)\n return [tuple(sequence[i:i+n]) for i in range(count)]", "def sent2ngrams(text, n=3):\n if n == \"word\":\n return text.split()\n return list(chain(*[word2ngrams(i,n) for i in text.lower().split()]))", "def get_n_minus_1_grams(n_grams: str) -> str:\n return n_grams.rsplit(' ')[0]", "def word2features(self,sent, i):\n word = sent[i][0]\n #postag = sent[i][1]\n\n features = {\n 'bias': 1.0,\n 'word.lower()': word.lower(),\n 'word.isupper()': word.isupper(),\n 'word.istitle()': word.istitle(),\n 'word.isdigit()': word.isdigit(),\n 'word.shape()':self.shape(word),\n 'word.isalnum()':word.isalnum(),\n 'word.isalpha()':word.isalpha(),\n # 'postag': postag,\n # 'postag[:2]': postag[:2],\n }\n if i > 0:\n word1 = sent[i - 1][0]\n #postag1 = sent[i - 1][1]\n features.update({\n '-1:word.lower()': word1.lower(),\n '-1:word.istitle()': word1.istitle(),\n '-1:word.isupper()': word1.isupper(),\n '-1:word.isdigit()': word1.isdigit(),\n '-1:word.isalnum()':word1.isalnum(),\n '-1:word.isalpha()':word1.isalpha(),\n # '-1:postag': postag1,\n # '-1:postag[:2]': postag1[:2],\n })\n else:\n features['BOS'] = True\n\n if i > 1:\n word2 = sent[i - 2][0]\n #postag2 = sent[i - 2][1]\n features.update({\n '-2:word.lower()': word2.lower(),\n '-2:word.istitle()': word2.istitle(),\n '-2:word.isupper()': word2.isupper(),\n '-2:word.isdigit()': word2.isdigit(),\n '-2:word.isalnum()': word2.isalnum(),\n '-2:word.isalpha()': word2.isalpha(),\n # '-2:postag': postag2,\n # '-2:postag[:2]': postag2[:2],\n })\n else:\n features['BOS1'] = True\n if i > 2:\n word3 = sent[i - 3][0]\n #postag3 = sent[i - 3][1]\n features.update({\n '-3:word.lower()': word3.lower(),\n '-3:word.istitle()': word3.istitle(),\n '-3:word.isupper()': word3.isupper(),\n '-3:word.isdigit()': word3.isdigit(),\n '-3:word.isalnum()': word3.isalnum(),\n '-3:word.isalpha()': word3.isalpha(),\n # '-3:postag': postag3,\n # '-3:postag[:2]': postag3[:2],\n })\n else:\n features['BOS2'] = True\n\n if i > 3:\n word4 = sent[i - 4][0]\n #postag4 = sent[i - 4][1]\n features.update({\n '-4:word.lower()': word4.lower(),\n '-4:word.istitle()': word4.istitle(),\n '-4:word.isupper()': word4.isupper(),\n '-4:word.isdigit()': word4.isdigit(),\n '-4:word.isalnum()': word4.isalnum(),\n '-4:word.isalpha()': word4.isalpha(),\n # '-4:postag': postag4,\n # '-4:postag[:2]': postag4[:2],\n })\n else:\n features['BOS2'] = True\n\n if i < len(sent) - 1:\n word1 = sent[i + 1][0]\n features.update({\n '+1:word.lower()': word1.lower(),\n '+1:word.istitle()': word1.istitle(),\n '+1:word.isupper()': word1.isupper(),\n '+1:word.isdigit()': word1.isdigit(),\n '+1:word.isalnum()': word1.isalnum(),\n '+1:word.isalpha()': word1.isalpha(),\n # '+1:postag': postag1,\n # '+1:postag[:2]': postag1[:2],\n })\n else:\n features['EOS'] = True\n if i < len(sent) - 2:\n word12 = sent[i + 2][0]\n #postag12 = sent[i + 2][1]\n features.update({\n '+2:word.lower()': word12.lower(),\n '+2:word.istitle()': word12.istitle(),\n '+2:word.isupper()': word12.isupper(),\n '+2:word.isdigit()': word12.isdigit(),\n '+2:word.isalnum()': word12.isalnum(),\n '+2:word.isalpha()': word12.isalpha(),\n # '+2:postag': postag12,\n # '+2:postag[:2]': postag12[:2],\n })\n else:\n features['EOS2'] = True\n if i < len(sent) - 3:\n word13 = sent[i + 3][0]\n #postag13 = sent[i + 3][1]\n features.update({\n '+3:word.lower()': word13.lower(),\n '+3:word.istitle()': word13.istitle(),\n '+3:word.isupper()': word13.isupper(),\n '+3:word.isdigit()': word13.isdigit(),\n '+3:word.isalnum()': word13.isalnum(),\n '+3:word.isalpha()': word13.isalpha(),\n # '+3:postag': postag13,\n # '+3:postag[:2]': postag13[:2],\n })\n else:\n features['EOS2'] = True\n if i < len(sent) - 4:\n word14 = sent[i + 4][0]\n #postag14 = sent[i + 4][1]\n features.update({\n '+4:word.lower()': word14.lower(),\n '+4:word.istitle()': word14.istitle(),\n '+4:word.isupper()': word14.isupper(),\n '+4:word.isdigit()': word14.isdigit(),\n '+4:word.isalnum()': word14.isalnum(),\n '+4:word.isalpha()': word14.isalpha(),\n # '+4:postag': postag14,\n # '+4:postag[:2]': postag14[:2],\n })\n else:\n features['EOS2'] = True\n return features", "def _ngrams(self, string_):\n def find_ngrams(input_list, n):\n return zip(*[input_list[i:] for i in range(n)])\n\n ngrams = []\n tokens = string_.split()\n\n for size in range(1, self._ngram_range + 1):\n tuples = find_ngrams(tokens, size)\n concatenated = [\"_\".join(tuple_) for tuple_ in tuples]\n ngrams.extend(concatenated)\n\n return \" \".join(ngrams)", "def turnmod(text,n,ngram_mod):\r\n data_gram=ngram_mod[0][text]\r\n for i in range(n-1):\r\n data_gram=ngram_mod[i+1][data_gram]\r\n return data_gram", "def classify(strings: List[str], params: Any) -> List[str]:\n \n # ############################ REPLACE THIS WITH YOUR CODE #############################\n def predict_one_sample(sample, train_dict, ngram_lvl=1):\n ngrams = [sample[i:i+ngram_lvl] for i in", "def word2ngrams(text, n=3):\n return [text[i:i+n] for i in range(len(text)-n+1)]", "def get_ngrams(stats,s,t,i):\n #lemma ngrams\n ngram_sizes = [\"bi\", \"tri\"]\n for ngram_size in ngram_sizes:\n lm_ngram = get_lemma_ngrams(s, t, i, ngram_size)\n if lm_ngram:\n put_feature_value_list(stats,\"lemma_\" + ngram_size + \"gr\", lm_ngram)\n\n #POS and deprel bigrams\n if i < s.length-1:\n put_feature_value_list(stats,\"deprels_bigr\", (t.deprel,s.nodes[i+1].deprel))\n put_feature_value_list(stats,\"pos_bigr\", (t.pos,s.nodes[i+1].pos))\n \n #POS and deprel trigrams\n if i < s.length-2:\n put_feature_value_list(stats,\"deprels_trigr\", (t.deprel, s.nodes[i+1].deprel, s.nodes[i+2].deprel))\n put_feature_value_list(stats,\"pos_trigr\", (t.pos, s.nodes[i+1].pos, s.nodes[i+2].pos))\n\n return stats", "def extract_nps(text, annotation):\n np_starts = [i for i in range(len(annotation)) if annotation[i] == 'B-NP']\n np_indexes = []\n for s in np_starts:\n i = 1\n while s+i < len(annotation) and annotation[s + i] == 'I-NP':\n i += 1\n np_indexes.append((s, s + i))\n return [' '.join(text[s:e]) for s, e in np_indexes]", "def get_dataset_features(text):\n return model.extract(text)", "def add_ngram(self, feature_vector, ngram):\n if ngram in self.ngrams:\n ngram_pos = self.ngrams[ngram]\n feature_vector[ngram_pos] = 1", "def __getitem__(self, ngram):\n return self._ngrams.get(ngram, 0.0)", "def __init__(self):\n self._ngrams = {}", "def ngrams(words, n=1):\n return [tuple(words[j:j + n]) for j in range(len(words) - n + 1)]", "def _generateNgrams(self,text,n=2):\n token = Utilities.CVTokeniser(text)\n # token = nltk.word_tokenize(text)\n computedNgrams=ngrams(token,n)\n return Counter(computedNgrams)", "def eval_ngram(self, ngram):\n # Get raw value-\n nom = self.freq_dist[ngram]\n if self.N == 1:\n nom = self.uni_dist[ngram]\n denom = sum(self.uni_dist.values())\n else:\n denom = self.prev_dist[ngram[:-1]]\n\n if self.smoothing == \"laplace\":\n nom += 1\n denom += len(self.freq_dist)\n\n if self.smoothing == \"interpolation\":\n\n p_at_n = [0 for x in range(self.N)]\n\n for n_len in range(1, self.N + 1):\n\n nom_ngram = tuple(ngram[len(ngram) - n_len:])\n denom_ngram = nom_ngram[:-1]\n\n\n nom_freq = self.freqs[n_len]\n denom_freq = self.freqs[n_len - 1 ]\n if n_len == 1:\n nom_freq = self.uni_dist\n denom_freq = self.uni_dist\n\n nom_count = nom_freq[nom_ngram]\n denom_count = denom_freq[denom_ngram]\n if n_len == 1:\n denom_count = sum(nom_freq.values()) \n \n p_at_n[n_len - 1] = 0\n if denom_count:\n p_at_n[n_len - 1] = nom_count / denom_count \n\n output_p = 0\n for index, item in enumerate(p_at_n):\n output_p += item * self.interp_coeff[index]\n return math.log2(output_p)\n\n return math.log2(nom/denom)", "def find_all_ngrams(dataset, n):\n return zip(*[dataset[i:] for i in xrange(n)])", "def test_no_ngrams():\n tokenizer = Tokenizer(quadgram_freq=2)\n X = tokenizer.transform([[\"a b c d\"]])\n assert X[\"corpus\"][0] == [\"a\", \"b\", \"c\", \"d\"]\n assert tokenizer.quadgrams is None", "def train_ngrams(dataset):\n trigram_counts = dict()\n bigram_counts = dict()\n unigram_counts = dict()\n token_count = 0\n\n ### YOUR CODE HERE\n def enterDic(phrase, dict):\n if phrase in dict:\n dict[phrase] += 1\n else:\n dict[phrase] = 1\n\n unigram_counts[word_to_num['UUUNKKK']] = 0\n\n for sentence in dataset:\n enterDic(sentence[1], unigram_counts) # count number of start of sentences\n enterDic((sentence[0], sentence[1]), bigram_counts) # count number of start of sentences\n token_count += 2\n for i in range(2, len(sentence)):\n token_count += 1\n enterDic(sentence[i], unigram_counts)\n enterDic((sentence[i - 1], sentence[i]), bigram_counts)\n enterDic((sentence[i - 2], sentence[i - 1], sentence[i]), trigram_counts)\n ### END YOUR CODE\n return trigram_counts, bigram_counts, unigram_counts, token_count", "def train_ngrams(dataset):\n trigram_counts = dict()\n bigram_counts = dict()\n unigram_counts = dict()\n token_count = 0\n ### YOUR CODE HERE\n raise NotImplementedError\n ### END YOUR CODE\n return trigram_counts, bigram_counts, unigram_counts, token_count", "def bigram_representation(data):\r\n vec = CountVectorizer(ngram_range=(1,2))\r\n vec = vec.fit(data)\r\n return vec", "def data_mining_features(index,input_string_x1,input_string_x2,vocab_word2index,word_vec_fasttext_dict,word_vec_word2vec_dict,tfidf_dict,n_gram=8):\r\n input_string_x1=input_string_x1.decode(\"utf-8\")\r\n input_string_x2 = input_string_x2.decode(\"utf-8\")\r\n #1. get blue score vector\r\n feature_list=[]\r\n #get blue score with n-gram\r\n for i in range(n_gram):\r\n x1_list=split_string_as_list_by_ngram(input_string_x1,i+1)\r\n x2_list = split_string_as_list_by_ngram(input_string_x2, i + 1)\r\n blue_score_i_1 = compute_blue_ngram(x1_list,x2_list)\r\n blue_score_i_2 = compute_blue_ngram(x2_list,x1_list)\r\n feature_list.append(blue_score_i_1)\r\n feature_list.append(blue_score_i_2)\r\n\r\n #2. get length of questions, difference of length\r\n length1=float(len(input_string_x1))\r\n length2=float(len(input_string_x2))\r\n length_diff=(float(abs(length1-length2)))/((length1+length2)/2.0)\r\n feature_list.append(length_diff)\r\n\r\n #3. how many words are same, how many words are unique\r\n sentence_diff_overlap_features_list=get_sentence_diff_overlap_pert(index,input_string_x1,input_string_x2)\r\n feature_list.extend(sentence_diff_overlap_features_list)\r\n\r\n #4. question 1,2 start with how/why/when\r\n #how_why_feature_list=get_special_start_token(input_string_x1,input_string_x2,special_start_token)\r\n #print(\"how_why_feature_list:\",how_why_feature_list)\r\n #feature_list.extend(how_why_feature_list)\r\n\r\n #5.edit distance\r\n edit_distance=float(edit(input_string_x1, input_string_x2))/30.0\r\n feature_list.append(edit_distance)\r\n\r\n #6.cos distance from sentence embedding\r\n x1_list=token_string_as_list(input_string_x1, tokenize_style='word')\r\n x2_list = token_string_as_list(input_string_x2, tokenize_style='word')\r\n distance_list_fasttext = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict)\r\n distance_list_word2vec = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_word2vec_dict, tfidf_dict)\r\n #distance_list2 = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict,tfidf_flag=False)\r\n #sentence_diffence=np.abs(np.subtract(sentence_vec_1,sentence_vec_2))\r\n #sentence_multiply=np.multiply(sentence_vec_1,sentence_vec_2)\r\n feature_list.extend(distance_list_fasttext)\r\n feature_list.extend(distance_list_word2vec)\r\n #feature_list.extend(list(sentence_diffence))\r\n #feature_list.extend(list(sentence_multiply))\r\n return feature_list", "def make_returnn_audio_features_func():\n return _extract", "def toLingDataToken(token):\n\n t = Token()\n\n t.set(\n id=token.i,\n word=token.orth_,\n lemma=token.lemma_,\n POS=token.tag_,\n SPOS=token.pos_,\n depID=token.dep,\n depStr=token.dep_,\n NE=token.ent_type_,\n foreign=token.is_oov\n )\n\n # setting features\n '''\n t.features = {}\n #print(t.POS)\n featureStr = translate(t.POS)\n # save string form of feature translation\n t.features['str'] = featureStr\n\n featureArr = featureStr.split(\"+\")\n #print(featureArr)\n # find the first feature\n i = 0\n while len(featureArr[i]) < 1:\n i += 1\n\n t.features['type'] = featureArr[i]\n if t.features['type'] in [\"N\"]:\n # look for number\n i += 1\n while i < len(featureArr):\n # this means it's probably a number declaration\n if len(featureArr[i]) < 4:\n t.features['number'] = featureArr[i]\n # and next feature could be type of noun\n if i + 1 < len(featureArr):\n t.features['isProper'] = featureArr[i + 1]\n break\n i += 1\n\n if t.features['type'] in [\"V\"]:\n # look for person and number\n i += 1\n while i < len(featureArr):\n # this means it's probably a person declaration\n if len(featureArr[i]) < 4:\n t.features['person'] = featureArr[i]\n # and next feature could be number\n if i + 1 < len(featureArr):\n t.features['number'] = featureArr[i + 1]\n break\n else:\n # probably a tense\n t.features['tense'] = featureArr[i]\n t.features['isParticiple'] = (\"Part\" in featureArr[i])\n\n i += 1\n #print(t.features)\n '''\n\n # setting wordType\n if token.tag_ == \"BES\": # copula\n t.set(wordType=4)\n elif token.pos_ == \"VERB\":\n t.set(wordType=1)\n elif token.pos_ == \"NOUN\" or token.pos_ == \"PROPN\":\n t.set(wordType=2)\n elif token.pos_ == \"PRON\":\n t.set(wordType=3)\n else:\n t.set(wordType=5)\n\n # spaCy does not have coreferencing...\n\n return t", "def ngrams(text, n):\n grams = zip(*[text[i:] for i in range(n)])\n return [''.join(gram) for gram in grams]", "def get_ngrams(s, ngram_range=1):\n # tokens = s.split()\n # return filter(lambda token: len(token)>1, tokens)\n # return bigrams(s.split()) # NLTK bigrams method\n words = s.split()\n return [' '.join(words[i:i+ngram_range]) for i in range(len(words)-1)]", "def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):\n\n def _skip(gram):\n if not filter_fn:\n return False\n return filter_fn(gram)\n\n words = self.words(True)\n ngrams = [(s, e + 1)\n for s in range(len(words))\n for e in range(s, min(s + n, len(words)))\n if not _skip(words[s:e + 1])]\n\n # Concatenate into strings\n if as_strings:\n ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]\n\n return ngrams", "def get_data_from_nonformat_text():\n pass", "def build_model(self, text):\n text = '< ' * (self.n - 1) + text.replace(' . ', ' .%s ' % (' <' * (self.n - 1))) + ' >'\n tokens = self.split(text)\n self.corpus_len = len(tokens)\n self.n_grams_by_len = [{} for _ in range(self.n)]\n for i in range(len(tokens)): # for index in tokens\n for n in range(self.n): # for n-gram size from 1 to n:\n if i >= n: # if the index has advanced enough for this n\n n_gram = self.join(tokens[i - n: i + 1])\n n_grams = self.n_grams_by_len[n] # get dict for respective n\n n_grams[n_gram] = n_grams.get(n_gram, 0) + 1 # get dict for respective n\n return self.get_model()", "def ngrams(tokens,lang):\n stopwords = stops.stopwords[lang]\n max = len(tokens)\n ngrams = []\n left_punctuation = '!\"%&\\'()*+,-./:;<=>?[\\\\]^_`{|}~'\n\n for i in range(1,max):\n for j in xrange(0,len(tokens)-(i-1)):\n if __check_features(tokens[j:j+i],stopwords):\n ng_str = \" \".join(tokens[j:j+i])\n ng_str = (ng_str.rstrip(string.punctuation)).lstrip(left_punctuation) \n ngrams.append(ng_str)\n \n ng_str = \" \".join(tokens)\n ng_str = (ng_str.rstrip(string.punctuation)).lstrip(left_punctuation) \n ngrams.append(ng_str)\n return ngrams", "def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):\n def _skip(gram):\n if not filter_fn:\n return False\n return filter_fn(gram)\n\n words = self.words(uncased)\n ngrams = [(s, e + 1)\n for s in range(len(words))\n for e in range(s, min(s + n, len(words)))\n if not _skip(words[s:e + 1])]\n\n # Concatenate into strings\n if as_strings:\n ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]\n\n return ngrams", "def build_ngram_vocab(self, n):\n max_ngram_per_word = 0\n ngram_dict = collections.defaultdict(int)\n for word in self.train_data:\n if word == self.eos or word == self.sos:\n continue\n _word = '^' + word + '$'\n ngram_counts = len(_word) - n + 1\n if ngram_counts > max_ngram_per_word:\n max_ngram_per_word = ngram_counts\n for i in range(ngram_counts):\n ngram = _word[i:i + n]\n ngram_dict[ngram] += 1\n\n unk_ngram_list = set()\n item_to_id = dict()\n item_to_id[constants.PAD_ITEM] = len(item_to_id)\n item_to_id[constants.UNK_ITEM] = len(item_to_id)\n sorted_dict = sorted(ngram_dict.items(), key=operator.itemgetter(1), reverse=True)\n for token, freq in sorted_dict:\n if freq == 1:\n unk_ngram_list.add(token)\n if token not in item_to_id:\n item_to_id[token] = len(item_to_id)\n return item_to_id, unk_ngram_list, max_ngram_per_word", "def ngrams_(text, n):\n return zip(*[text[i:] for i in range(n)])", "def compute_ngrams(str, n=2):\n #split the string\n st = tuple(str.split())\n #not list of list but list of tuple..\n gram = [st[x:x+n] for x in range(len(st)-n+1)]\n dgram = {}\n #convert ngram into dictionary:\n for key in gram:\n dgram.setdefault(key[0],[]).append(key[1:])\n return dgram", "def unigram_representation(data):\r\n vec = CountVectorizer()\r\n vec = vec.fit(data)\r\n return vec", "def reconstruct_ngram(self, ngram):\n\n punc_b = ['!', '?', '.', ',', ';', ':', '\\'', ')', ']', '}']\n punc_a = ['(', '[', '}', '$']\n ngram = ' '.join(ngram)\n for p in punc_b:\n ngram = ngram.replace(' '+p, p)\n for p in punc_a:\n ngram = ngram.replace(p+' ', p)\n ngram = re.sub('(^| )BEGQ', ' \"', ngram)\n ngram = re.sub('ENDQ($| )', '\" ', ngram)\n ngram = ngram.replace('DOUBLEDASH', '--')\n return ngram", "def count_n_grams(data, n, start_token='<s>', end_token = '<e>'):\r\n \r\n # Initialize dictionary of n-grams and their counts\r\n n_grams = {}\r\n\r\n \r\n for sentence in data: # complete this line\r\n \r\n # prepend start token n times, and append <e> one time\r\n sentence = [start_token]*n + sentence + [end_token]\r\n \r\n # convert list to tuple\r\n # So that the sequence of words can be used as\r\n # a key in the dictionary\r\n sentence = tuple(sentence)\r\n\r\n \r\n for i in range(len(sentence)+1-n): # complete this line\r\n\r\n # Get the n-gram from i to i+n\r\n n_gram = sentence[i:i+n]\r\n\r\n # check if the n-gram is in the dictionary\r\n if n_gram in n_grams: \r\n \r\n # Increment the count for this n-gram\r\n n_grams[n_gram] += 1\r\n else:\r\n # Initialize this n-gram count to 1\r\n n_grams[n_gram] = 1\r\n \r\n return n_grams", "def get_nouns(self):\n\t\tblob = TextBlob(self.raw_string)\n\n\t\tfor word, tag in blob.tags:\n\t\t\tif tag in ['NNP', 'NN']:\n\t\t\t\tself.nouns.append(word.lemmatize())", "def word_to_ngrams(self, word):\n encoding = list()\n n = self.n\n if word == self.eos or word == self.sos:\n encoding.append(self.ngram_to_id[word])\n else:\n _word = '^' + word + '$'\n for i in range(len(_word) - n + 1):\n ngram = _word[i:i + n]\n if ngram in self.ngram_to_id:\n encoding.append(self.ngram_to_id[ngram])\n else:\n for ch in ngram:\n flag = 1\n if ch in self.unk_char_list:\n flag = random.randint(0, 1)\n if ch in self.ngram_to_id and flag == 1:\n encoding.append(self.ngram_to_id[ch])\n else:\n encoding.append(self.ngram_to_id['<unk>'])\n return encoding", "def fd(self, sent, index, length):\n context = lambda idx, field: sent[index + idx][field] \\\n if index+idx >= 0 and index + idx < length \\\n else \"<s>\" if index+idx < 0 \\\n else \"</s>\"\n\n ## tokens in a 5 token window x_{i-2}..x_{i+2}\n word_unigram_cur = numify(context(0, WORD))\n word_unigram_pre = numify(context(-1, WORD))\n word_unigram_2pre = numify(context(-2, WORD))\n word_unigram_post = numify(context(1, WORD))\n word_unigram_2post = numify(context(2, WORD))\n\n ## token bigrams in a 5 token window\n word_bigram_pre_cur = \"/\".join([word_unigram_pre, word_unigram_cur])\n word_bigram_cur_post = \"/\".join([word_unigram_cur, word_unigram_post])\n\n ## pos in a 5 token window\n pos_cur = context(0, POS)\n pos_pre = context(-1, POS)\n pos_post = context(1, POS)\n pos_2pre = context(-2, POS)\n pos_2post = context(2, POS)\n\n ## pos bigrams in a 3 token window\n pos_bigram_pre_cur = \"/\".join([pos_pre, pos_cur])\n pos_bigram_cur_post = \"/\".join([pos_cur, pos_post])\n #pre_pre_pos_bigram = \"/\".join([pre_pre_pos, pre_pos])\n #post_post_pos_bigram = \"/\".join([post_pos, post_post_pos])\n\n pos_posw_cur = \"/\".join([word_unigram_cur, pos_cur])\n\n ## Word shape features (5 token window)\n shape_istitle_cur = word_unigram_cur.istitle()\n shape_isdigit_cur = context(0, WORD).isdigit()\n shape_isupper_cur = word_unigram_cur.isupper()\n shape_hyphen_cur = \"-\" in word_unigram_cur[1:-1]\n shape_isalnum_cur = context(0, WORD).isalnum()\n #shape_mixedcase_cur = self.mixedcase.match(context(0, WORD)) != None\n\n shape_istitle_pre = word_unigram_pre.istitle()\n shape_isdigit_pre = context(-1, WORD).isdigit()\n shape_isupper_pre = word_unigram_pre.isupper()\n shape_hyphen_pre = \"-\" in word_unigram_pre[1:-1]\n shape_isalnum_pre = context(-1, WORD).isalnum()\n #shape_mixedcase_pre = self.mixedcase.match(context(-1, WORD)) != None\n\n shape_istitle_2pre = word_unigram_2pre.istitle()\n shape_isdigit_2pre = context(-2, WORD).isdigit()\n shape_isupper_2pre = word_unigram_2pre.isupper()\n shape_hyphen_2pre = \"-\" in word_unigram_2pre[1:-1]\n shape_isalnum_2pre = context(-2, WORD).isalnum()\n #shape_mixedcase_2pre = self.mixedcase.match(context(-2, WORD)) != None\n\n shape_istitle_post = word_unigram_post.istitle()\n shape_isdigit_post = context(1, WORD).isdigit()\n shape_isupper_post = word_unigram_post.isupper()\n shape_hypen_post = \"-\" in word_unigram_post[1:-1]\n shape_isalnum_post = context(1, WORD).isalnum()\n #shape_mixedcase_post = self.mixedcase.match(context(1, WORD)) != None\n\n shape_istitle_2post = word_unigram_2post.istitle()\n shape_isdigit_2post = context(2, WORD).isdigit()\n shape_isupper_2post = word_unigram_2post.isupper()\n shape_hypen_2post = \"-\" in word_unigram_2post[1:-1]\n shape_isalnum_2post = context(2, WORD).isalnum()\n #shape_mixedcase_2post = self.mixedcase.match(context(2, WORD)) != None\n\n ## 2-4 suffixes in a 3 token window\n suffix_1_cur = word_unigram_cur[-1:]\n suffix_2_cur = word_unigram_cur[-2:]\n suffix_3_cur = word_unigram_cur[-3:]\n suffix_4_cur = word_unigram_cur[-4:]\n\n suffix_1_pre = word_unigram_pre[-1:]\n suffix_2_pre = word_unigram_pre[-2:]\n suffix_3_pre = word_unigram_pre[-3:]\n suffix_4_pre = word_unigram_pre[-4:]\n\n suffix_1_post = word_unigram_post[-1:]\n suffix_2_post = word_unigram_post[-2:]\n suffix_3_post = word_unigram_post[-3:]\n suffix_4_post = word_unigram_post[-4:]\n\n ## 3-4 prefixes in a 3 token window\n prefix_3_cur = word_unigram_cur[:3]\n prefix_4_cur = word_unigram_cur[:4]\n\n prefix_3_pre = word_unigram_pre[:3]\n prefix_4_pre = word_unigram_pre[:4]\n\n prefix_3_post = word_unigram_post[:3]\n prefix_4_post = word_unigram_post[:4]\n\n ## Noun phrase in a 3 token window\n syn_np_cur = context(0, NP)\n syn_npw_cur = \"/\".join([syn_np_cur, word_unigram_cur])\n syn_np_pre = context(-1, NP)\n syn_np_post = context(1, NP)\n\n ## Extract features from local scope\n features = locals()\n del features[\"context\"]\n del features[\"sent\"]\n del features[\"index\"]\n del features[\"length\"]\n del features[\"self\"]\n features = features.items()\n\n features.extend(self.brown_extractor(\"brown_%d_cur\", context(0, WORD)))\n features.extend(self.brown_extractor(\"brown_%d_pre\", context(-1, WORD)))\n features.extend(self.brown_extractor(\"brown_%d_2pre\", context(-2, WORD)))\n features.extend(self.brown_extractor(\"brown_%d_post\", context(1, WORD)))\n features.extend(self.brown_extractor(\"brown_%d_2post\", context(2, WORD))) \n\n return features", "def ngrams(word, n):\n word = list(word)\n # insert extra tokens\n word.insert(0, '$')\n word.append('$')\n\n output = []\n for i in range(len(word) - n + 1):\n # print(i)\n # print(word[i:i + n])\n output.append(''.join(word[i:i + n]))\n return output", "def filter_ngram(gram, mode='any'):\n filtered = [filter_word(w) for w in gram]\n if mode == 'any':\n return any(filtered)\n elif mode == 'all':\n return all(filtered)\n elif mode == 'ends':\n return filtered[0] or filtered[-1]\n else:\n raise ValueError('Invalid mode: %s' % mode)", "def ngrams(word, size):\n expanded = \"^\" + word + \"$\"\n for start in range(len(expanded) - size + 1):\n yield expanded[start:start + size]", "def extract_data(file_ner,file_pos,separator=\" \"):\n\n # read NER and POS from the two files\n words_tags=read_conll_file(file_ner)\n words_pos=read_conll_file(file_pos)\n \n ## some checks, e.g., that both files have same length, same tokens\n assert(len(words_tags)==len(words_pos))\n \n for (words,tags),(_,pos) in zip(words_tags,words_pos):\n for word,pos,tag in zip(words,pos,tags):\n # first letter is capitalized\n cap=\"+\" if word[0].isupper() else \"-\"\n hyphen = '+' if '-' in word else '-'\n l = str(len(word))\n #vowels = \"\".join(sorted([w for w in word.lower() if w in ['a','e','i','o','u','y']]))\n #################################\n ###### YOUR FEATURES HERE ####### \n #################################\n # 0=separator\n \n ## todo: output the cap feature and more \n ## make sure the format you output here is what the nerfeats.py script expects as fields!\n print separator.join([word.lower(),pos,cap, l, hyphen, tag])\n # sentence separator\n print \"\"", "def create_feature_map(string, features):\n fmap = {}\n vec = create_vector(string)\n\n for ngram in features:\n if ngram in vec:\n fmap[ngram] = vec[ngram]\n\n return fmap", "def pgram(w, freqs, N):\n mle = 0\n fifo = [':'] * N\n for i in range(N-1):\n \tw += ':'\n for c in w:\n fifo.pop(0)\n fifo.append(c)\n n = N\n ngram = ''.join(fifo[:n])\n p = log(MLE(ngram, freqs))\n mle += p\n return mle", "def _get_nouns(self, review):\n review_features = []\n for sent in review:\n doc = self.nlp(sent)\n # noun_phrase = [np.text for np in doc.noun_chunks]\n nouns = [unicode(lemma(str(word).lower())) for word in doc if word.pos == NOUN]\n review_features.append(nouns)\n return review_features", "def process_input(fname,onlynugget,onlyarg):\n content=utils.readFileEncode(fname,'utf8')\n lines = content.split('\\n')[:-1]\n sentences=[]\n labels=[]\n sent=[]\n label=[]\n for i in range(len(lines)):\n if len(lines[i])>3:\n words=lines[i].split('\\t')\n word={'originalText':words[0],'offset':int(words[1])}\n sent.append(word)\n if onlynugget:\n if words[2] in NuggetList10:\n label.append(words[2]) \n else:\n label.append('O')\n elif onlyarg:\n if words[2] in ArgumentList:\n\n if 'Software' in words[2]:\n label.append(words[2][0:2]+'System')\n else:\n label.append(words[2])\n else:\n label.append('O')\n else:\n if len(sent)>0 and len(label)>0: \n sentences.append(sent)\n labels.append(label) \n sent=[]\n label=[]\n elif len(sent)==0 and i < len(lines)-1:\n sentences.append([])\n labels.append([])\n \n return sentences,labels", "def iter_ngrams(self, sentence, n):\n return [tuple(sentence[i : i+n]) for i in range(len(sentence)-n+1)]", "def ngrams(text, n):\n return chain(*[ngrams_(text, i) for i in range(n + 1)])", "def __init__(self, ns):\n\n ns.sort()\n self.ns = ns\n self.ngrams = {}\n self.feature_list = []\n self.name = 'NgramExtractorBase'", "def text_feature_extract(df):\n return df", "def test_regex_featurizer():\n from rasa.nlu.featurizers.sparse_featurizer.regex_featurizer import RegexFeaturizer\n sentence, expected, labeled_tokens = (\n \"hey how are you today\",\n [\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n ],\n [0],\n )\n patterns = [\n {\"pattern\": \"[0-9]+\", \"name\": \"number\", \"usage\": \"intent\"},\n {\"pattern\": \"\\\\bhey*\", \"name\": \"hello\", \"usage\": \"intent\"},\n {\"pattern\": \"[0-1]+\", \"name\": \"binary\", \"usage\": \"intent\"},\n ]\n ftr = RegexFeaturizer({}, known_patterns=patterns)\n\n # adds tokens to the message\n tokenizer = SpacyTokenizer({})\n message = Message(sentence, data={RESPONSE: sentence})\n assert show_message(message, False) == {\n \"response\": \"hey how are you today\",\n \"text\": \"hey how are you today\"\n }\n message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence))\n tokenizer.process(message)\n # assert show_message(message) == {'response': 'hey how are you today', 'text_spacy_doc': spacy_nlp(\"hey how are you today\"),\n # 'tokens': ['hey', 'how', 'are', 'you', 'today', '__CLS__'],\n # 'text': 'hey how are you today'}\n # result = ftr._features_for_patterns(message, TEXT)\n ftr.process(message) # [TEXT, RESPONSE]\n show_message(message)\n assert len(message.get(TOKENS_NAMES[TEXT], [])) > 0", "def ngrams(self):\n return self.root.ngrams()", "def extract_features(docs_train, docs_test, perform_dimensionality_reduction):\n word_ngram_range = (1, 4)\n char_ngram_range = (2, 5)\n\n '''\n Build an n grams vectorizer with word_n_gram_range and char_n_gram_range\n '''\n\n ngrams_vectorizer = create_n_grams_vectorizer(\n word_ngram_range, char_ngram_range)\n\n # use the n_gram vectorizer to form the train and test dataset\n # it will take a lot of time... i think\n X_train = ngrams_vectorizer.fit_transform(docs_train)\n X_test = ngrams_vectorizer.transform(docs_test)\n print(\"Performed fitting of data\")\n\n ############ dimensionality reduction ################\n\n if(perform_dimensionality_reduction == True):\n X_train, X_test = perform_dimensionality_reduction(X_train, X_test)\n\n # print(docs_train[0])\n return X_train, X_test", "def get_ngrams(self, n = None):\n if not n:\n n = self.N\n\n temp = []\n for sent in self.get_sentences():\n temp.extend(list(sent))\n\n return nltk.ngrams(temp, n)", "def get_ngrams(sequence, n):\n length = len(sequence)\n #if only require 1-gram, then we need to add one START and one END to the sequence. \n if n==1 or n==2:\n sequence=[\"START\"]*n+sequence+[\"STOP\"]\n end = n+1 #end i means that when n==1, we need to read one more data, that is to the end of sequence, which is slightly different from when n>1.\n #if require multi-grams, use the common calculation below.\n else:\n sequence = [\"START\"]*(n-1)+sequence+[\"STOP\"]\n end = 1\n if n==2:\n end = n\n result = []\n temp = ()\n #the process to construct the tuple-based array.\n for i in range(0,length+end):\n temp = tuple(sequence[i:i+n])\n\n result.append(temp)\n return result", "def ingest_ngram(self, ngram, doc, n):\n # construct the storable ngram object\n #self.ngrams[n].append({\n # 'date': doc['date'], # store this so it can be pruned when old\n # 'ngram': ngram,\n # 'subject': doc['subject'] if 'subject' in doc else None\n #})\n # initialised hash element\n if not ngram in self.ngram_history:\n self.ngram_history[ngram] = { 'occurances': [] }\n\n self.ngram_history[ngram]['occurances'].append({ 'date': doc['date'], 'doc_id': doc['id'] })\n\n # @todo: add this to a queue to look for trends...this.isNGramTrending(ngram, doc);", "def get_ngrams(self, s, ngmin, ngmax, separator=\"\",\n bos=\"<\", eos=\">\", suffix=\"\", flatten=True):\n\n # return a single dummy feature if there are no applicable ngrams\n # probably resulting in a mojority-class classifier\n if ngmax == 0 or (ngmax - ngmin < 0) :\n return ['__dummy__']\n\n ngrams = [[] for x in range(1, ngmax + 1)]\n s = [bos] + s + [eos]\n for i, ch in enumerate(s):\n for ngsize in range(ngmin, ngmax + 1):\n if (i + ngsize) <= len(s):\n ngrams[ngsize - 1].append(\n separator.join(s[i:i+ngsize]) + suffix)\n if flatten:\n ngrams = [ng for nglist in ngrams for ng in nglist]\n return ngrams", "def transform_ngrams(self, words):\n return words if self.n_grams == 1 else [self.connector.join(words[i:i + self.n_grams]) for i in range(len(words) - self.n_grams + 1)]", "def data_process(raw_text_iter: dataset.IterableDataset) -> Tensor:\n data = [torch.tensor(vocab(tokenizer(item)), dtype=torch.long) for item in raw_text_iter]\n return torch.cat(tuple(filter(lambda t: t.numel() > 0, data)))", "def preprocess_char_ngram(self):\n self.char_to_id, self.unk_char_list = self.build_vocab(mode=\"char\")\n self.ngram_to_id, self.unk_ngram_list, self.max_ngram_per_word = self.build_ngram_vocab(self.n)\n for ch in self.char_to_id:\n if ch not in self.ngram_to_id:\n self.ngram_to_id[ch] = len(self.ngram_to_id)\n self.subword_vocab_size = len(self.ngram_to_id)\n with open(self.sub_vocab_file, 'wb') as f:\n pickle.dump((self.ngram_to_id, self.unk_char_list, self.unk_ngram_list, self.max_ngram_per_word), f)", "def clean_substr(self, match_obj):\n x = MLStripper()\n x.feed(match_obj.group(1).strip())\n return x.get_fed_data()", "def __init__(self, sents, n, corpus='', D=None):\n\n self.n = n\n self.D = D\n self.corpus = corpus\n self.smoothingtechnique = 'Kneser Ney Smoothing'\n # N1+(·w_<i+1>)\n self._N_dot_tokens_dict = N_dot_tokens = defaultdict(set)\n # N1+(w^<n-1> ·)\n self._N_tokens_dot_dict = N_tokens_dot = defaultdict(set)\n # N1+(· w^<i-1>_<i-n+1> ·)\n self._N_dot_tokens_dot_dict = N_dot_tokens_dot = defaultdict(set)\n self.counts = counts = defaultdict(int)\n vocabulary = []\n\n if D is None:\n total_sents = len(sents)\n k = int(total_sents*9/10)\n training_sents = sents[:k]\n held_out_sents = sents[k:]\n training_sents = list(map(lambda x: ['<s>']*(n-1) + x + ['</s>'], training_sents))\n for sent in training_sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n if ngram:\n if len(ngram) == 1:\n vocabulary.append(ngram[0])\n else:\n right_token, left_token, right_kgram, left_kgram, middle_kgram =\\\n ngram[-1:], ngram[:1], ngram[1:], ngram[:-1], ngram[1:-1]\n N_dot_tokens[right_kgram].add(left_token)\n N_tokens_dot[left_kgram].add(right_token)\n if middle_kgram:\n N_dot_tokens_dot[middle_kgram].add(right_token)\n N_dot_tokens_dot[middle_kgram].add(left_token)\n if n - 1:\n counts[('<s>',)*(n-1)] = len(sents)\n self.vocab = set(vocabulary)\n aux = 0\n for w in self.vocab:\n aux += len(self._N_dot_tokens_dict[(w,)])\n self._N_dot_dot_attr = aux\n D_candidates = [i*0.12 for i in range(1, 9)]\n xs = []\n for D in D_candidates:\n self.D = D\n aux_perplexity = self.perplexity(held_out_sents)\n xs.append((D, aux_perplexity))\n xs.sort(key=lambda x: x[1])\n self.D = xs[0][0]\n with open('old-stuff/kneserney_' + str(n) + '_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('D: {}\\n'.format(self.D))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n\n # discount value D provided\n else:\n sents = list(map(lambda x: ['<s>']*(n-1) + x + ['</s>'], sents))\n for sent in sents:\n for j in range(n+1):\n # all k-grams for 0 <= k <= n\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n if ngram:\n if len(ngram) == 1:\n vocabulary.append(ngram[0])\n else:\n # e.g., ngram = (1,2,3,4,5,6,7,8)\n # right_token = (8,)\n # left_token = (1,)\n # right_kgram = (2,3,4,5,6,7,8)\n # left_kgram = (1,2,3,4,5,6,7)\n # middle_kgram = (2,3,4,5,6,7)\n right_token, left_token, right_kgram, left_kgram, middle_kgram =\\\n ngram[-1:], ngram[:1], ngram[1:], ngram[:-1], ngram[1:-1]\n N_dot_tokens[right_kgram].add(left_token)\n N_tokens_dot[left_kgram].add(right_token)\n if middle_kgram:\n N_dot_tokens_dot[middle_kgram].add(right_token)\n N_dot_tokens_dot[middle_kgram].add(left_token)\n if n-1:\n counts[('<s>',)*(n-1)] = len(sents)\n self.vocab = set(vocabulary)\n\n aux = 0\n for w in self.vocab:\n aux += len(self._N_dot_tokens_dict[(w,)])\n self._N_dot_dot_attr = aux\n\n xs = [k for k, v in counts.items() if v == 1 and n == len(k)]\n ys = [k for k, v in counts.items() if v == 2 and n == len(k)]\n n1 = len(xs)\n n2 = len(ys)\n self.D = n1 / (n1 + 2 * n2)", "def get_ngramlogprobs_fromcorpus(tokenizedseqs, n):\n return", "def get_preds_ngram(preds, len_preds, n):\n from utils.dataProcess import get_N_gram\n\n def iter_preds(preds, len_preds):\n for len, utt in zip(len_preds, preds):\n for token in utt[:len]:\n yield token.numpy()\n ngrams = get_N_gram(iter_preds(preds, len_preds), n)\n\n return ngrams", "def learn_from_one(self, words):\n for i in xrange(len(words)):\n for n in self.ns:\n ngram = self.try_get_ngram(words, n, i)\n if ngram and ngram not in self.ngrams:\n self.ngrams[ngram] = len(self.feature_list)\n self.feature_list.append(ngram)", "def ngrams(doc, n, filter_stopwords=True, good_tags={'NOUN', 'PROPN', 'ADJ'}):\n ngrams_ = (doc[i:i + n] for i in range(len(doc) - n + 1))\n ngrams_ = (ngram for ngram in ngrams_\n if not any(w.is_space or w.is_punct for w in ngram))\n\n if filter_stopwords:\n ngrams_ = (ngram for ngram in ngrams_\n if not any(w.is_stop for w in ngram))\n if good_tags:\n ngrams_ = (ngram for ngram in ngrams_\n if all(w.pos_ in good_tags for w in ngram))\n\n for ngram in ngrams_:\n yield ngram", "def dissect(self, text):", "def get(self, n):\n parts = [' '.join(g) for g in ngrams(self.words, n)]\n return get_occurences(parts)", "def make_tweet_nparr( txt ):\n # result storage\n fvec = numpy.empty( len(testFeatures) )\n\n # search for each feature\n txtLow = ' ' + txt.lower() + ' '\n for i in range( 0, len(testFeatures) ):\n\n key = testFeatures[i][0]\n\n fvec[i] = False\n for tstr in testFeatures[i][1]:\n fvec[i] = fvec[i] or (txtLow.find(tstr) != -1)\n\n return fvec", "def n_grama(sentence, n):\n t = tokenize(sentence)\n n_grams = zip(*[t[i:] for i in range(n)])\n return list(map(lambda n_gram: ' '.join(n_gram), n_grams))", "def ngramify(corpus, n):\r\n unlist = 0\r\n if type(corpus[0]) is not list:\r\n corpus = [corpus]\r\n unlist = 1\r\n new_corpus = []\r\n for line in corpus:\r\n new_line = []\r\n for gram in range(len(line) - n + 1):\r\n new_gram = \"\"\r\n for i in range(n):\r\n if i != 0:\r\n new_gram += \" \"\r\n new_gram += line[gram + i]\r\n new_line.append(new_gram)\r\n new_corpus.append(new_line)\r\n if unlist:\r\n return new_corpus[0]\r\n return new_corpus", "def morph_features(prev_word, pos, nlp, lm):\n feat_string = ''\n feat_dict = {}\n # Collect 15 words which are most likely to follow the previous word w_(i-1)\n # according to the LM and preserve only the ones that belong to the same\n # part of speech as the current word w_i\n for word, _ in lm.counts.__getitem__([prev_word]).most_common(20):\n doc = nlp(word).to_dict()[0][0]\n if 'feats' in doc and (doc['upos'] == pos or doc['xpos'].split('|')[0] == pos):\n for pair in doc['feats'].split('|'):\n feat, val = pair.split('=')\n if feat in feat_dict:\n feat_dict[feat].append(val)\n else:\n feat_dict[feat] = [val]\n #print(feat_dict)\n # Find the most common value for each feature\n for dict_key in feat_dict.keys():\n feat_string += dict_key + ': ' + max(set(feat_dict[dict_key]), \n key = feat_dict[dict_key].count) + ', '\n if feat_string:\n feat_string = ' (' + feat_string[:-2] + ')'\n return feat_dict, feat_string", "def get_grams(candidate, n):\n words = candidate.split(' ')\n # print(words)\n grams = list()\n for i in range(len(words) - n + 1):\n # print(words[i:i+n])\n grams.append(' '.join(words[i:i+n]))\n return grams", "def generate_ngram(corpus,n=2):\r\n def generate_ngram_str(text,n):\r\n text = tokenizer.tokenize(text)\r\n for i in range(0, len(text)-n+1):\r\n yield text[i:i+n]\r\n if isinstance(corpus,str):\r\n for ngram in generate_ngram_str(corpus,n):\r\n yield ngram\r\n elif isinstance(corpus, (list, types.GeneratorType)):\r\n for text in corpus:\r\n for ngram in generate_ngram_str(text,n):\r\n yield ngram", "def ngram_detection(self, min_topic_count=5, min_text_id_count=4):\n\n for text_id, text in self.texts.items():\n # single-word topics act a bit different (no zips or comprehensions)\n # store data in self.topics, not zip_grams\n for word in text['doc']:\n word_lemma = word.text.lower() if word.lemma_ == '-PRON-' else word.lemma_\n\n if {word.text}.intersection(self.punct) or {word.lemma_}.intersection(self.stop_words):\n continue\n\n if not (word.pos in self.nouns or word.ent_type in self.entities):\n continue\n\n if word_lemma in self.topics:\n self.topics[word_lemma][\"count\"] += 1\n self.topics[word_lemma][\"textIDs\"] |= {text_id}\n self.topics[word_lemma][\"verbatims\"] |= {word.text.lower()}\n else:\n self.topics[word_lemma] = {\"name\": word_lemma,\n \"count\": 1,\n \"textIDs\": {text_id},\n \"verbatims\": {word.text.lower()},\n \"subtopics\": {}}\n\n # Populate self.ngrams and self.topics\n for text_id, text in self.texts.items():\n doc = text['doc']\n\n # Find pentagrams - ngrams with 5 words\n for ngram in zip(doc, doc[1:], doc[2:], doc[3:], doc[4:]):\n self._ngram_counter(ngram, 5, text_id, doc)\n\n # Find pentagrams - ngrams with 4 words\n for ngram in zip(doc, doc[1:], doc[2:], doc[3:]):\n self._ngram_counter(ngram, 4, text_id, doc)\n\n for ngram in zip(doc, doc[1:], doc[2:]):\n self._ngram_counter(ngram, 3, text_id, doc)\n\n for ngram in zip(doc, doc[1:]):\n self._ngram_counter(ngram, 2, text_id, doc)\n\n\n # Add text_id_count (the number of texts that the topic occurs in; so a topic might occur 50 times,\n # but it's only mentioned in 3 different texts, we'd show 3.\n for _, topic in self.topics.items():\n topic['textIDCount'] = len(topic['textIDs'])\n for _, ngram in self.ngrams.items():\n ngram['textIDCount'] = len(ngram['textIDs'])\n\n # Eliminate rarely occurring topics and ngrams.\n self.topics = {k: v for k, v in self.topics.items() if\n v['textIDCount'] >= min_text_id_count and v['count'] >= min_topic_count}\n self.ngrams = {k: v for k, v in self.ngrams.items() if\n v['textIDCount'] >= min_text_id_count}\n\n # Loop through each ngram pair: outer loop is all ngrams, inner loop is all ngrams\n for ngram_lemma, ngram in self.ngrams.items():\n for ngram_plus_lemma, ngram_plus in self.ngrams.items():\n # only stay in this loop if the inner ngram is one word longer than the outer loop and if the\n # inner loop lemma contains the outer group lemma (avoid partial word matches like man in woman)\n # r'\\b' + ngram_lemma + r'\\b' --> does the ngram lemma fit in ngram_plus lemma (\\b is word boundary)\n if ngram['n'] + 1 != ngram_plus['n']:\n continue\n\n if not re.search(r'\\b' + ngram_lemma + r'\\b', ngram_plus_lemma):\n continue\n\n # Is the absolute count of occurrences and the count of text_id occurrences both big enough to use it\n # instead of the other loop?\n if ngram_plus['count'] + 3 >= ngram['count'] and ngram_plus['textIDCount'] + 3 >= ngram['textIDCount']:\n # TODO: Is this the right action (deleting shorter, but not much more explanatory) phrase?\n # TODO: Is this enough? Or will I end up double explaining things sometimes?\n ngram['count'] = -1\n\n # Eliminate newly demoted items\n self.ngrams = {ngram_lemma: ngram for ngram_lemma, ngram in self.ngrams.items() if ngram['count'] > 0}", "def build_ngrams(tokens, n=2):\n ngrams = zip(*(islice(group, idx, None) for idx, group in enumerate(tee(tokens, n))))\n return ngrams", "def fetch_multimnist_text(label):\n text = char_tensor(label).unsqueeze(0)\n return Variable(text, volatile=True)", "def test_ngram():\n # Some examples of functions usage\n trigram_counts, bigram_counts, unigram_counts, token_count = train_ngrams(S_train)\n print \"#trigrams: \" + str(len(trigram_counts))\n print \"#bigrams: \" + str(len(bigram_counts))\n print \"#unigrams: \" + str(len(unigram_counts))\n print \"#tokens: \" + str(token_count)\n perplexity = evaluate_ngrams(S_dev, trigram_counts, bigram_counts, unigram_counts, token_count, 0.5, 0.4)\n print \"#perplexity: \" + str(perplexity)\n ### YOUR CODE HERE\n print(vocabsize)\n ### END YOUR CODE" ]
[ "0.6531941", "0.6494894", "0.63788724", "0.61673313", "0.6166521", "0.6000876", "0.59034157", "0.5884168", "0.58563024", "0.57961416", "0.57676554", "0.57167673", "0.5701004", "0.5696321", "0.56779176", "0.56565773", "0.5656345", "0.5622813", "0.5619041", "0.5607017", "0.558414", "0.5573004", "0.5559363", "0.5554977", "0.55391914", "0.55348444", "0.5531596", "0.5530072", "0.5526714", "0.5521326", "0.55153596", "0.55074364", "0.5498495", "0.5487681", "0.54844856", "0.54725945", "0.54697007", "0.5464036", "0.5458944", "0.54479134", "0.5443375", "0.54338086", "0.54266626", "0.5425011", "0.54220647", "0.5414336", "0.54032433", "0.54021555", "0.5399689", "0.53740513", "0.5372789", "0.5371466", "0.53679395", "0.5363972", "0.53518474", "0.53476083", "0.5343369", "0.53223795", "0.53157645", "0.531297", "0.53074133", "0.5306749", "0.52970314", "0.52896243", "0.52891964", "0.52840877", "0.527388", "0.52552795", "0.525461", "0.52530926", "0.5248467", "0.5247228", "0.5237625", "0.52360016", "0.52327675", "0.5223194", "0.52215314", "0.5214706", "0.52038854", "0.52030134", "0.5200678", "0.5198115", "0.519183", "0.5187046", "0.5170181", "0.5164818", "0.51631665", "0.51551974", "0.5150548", "0.51432604", "0.5140815", "0.5140495", "0.5135932", "0.51336956", "0.5129973", "0.5128804", "0.5128536", "0.51199126", "0.51190704", "0.5107215", "0.5102968" ]
0.0
-1
Extract POS ngram feature from tree dict
def ExtractFeatureOnCorpus(self, dataDict): wd = WritingData.GetInstance() # Step 1. First pass scan to get sorted vocab vocab = {} # POS vocab classSet = set() classColStr = "Nationality" numWrt = 0 for wrtId in dataDict.keys(): # wrtId (int) classVal = wd.GetValueByWid(wrtId, classColStr) # classId, i.e. nationality classVal = classVal.lower() classSet.add(classVal) numWrt += 1 if numWrt % 500 == 0: print("Processed num writing = %d" % numWrt) dataList = dataDict[wrtId] for dataStr in dataList: fes = self.__processSingleInstance(dataStr) # features from this tree for fe in fes: if vocab.has_key(fe): vocab[fe] += 1 else: vocab[fe] = 1 lg = Log() msg = "[DepdExtractor] " + "First pass vocab scan, #vocab = %d" % len(vocab) lg.PrintWriteLog(msg) # Step 2. Sort vocab, and doing min frequency cut-off if self.__minFreq > 1: for key in vocab.keys(): freq = vocab[key] if freq < self.__minFreq: del vocab[key] msg = "[DepdExtractor] " + \ "Applied minimum frequency cut-off (#attr) #vocab = %d" % len(vocab) print(msg) lg.WriteLog(msg) import operator sortedVocab = sorted(vocab.iteritems(), key = operator.itemgetter(1), \ reverse = True) attrIdx = {} # Index for attributes, for later looking-up idx = 0 for (word, freq) in sortedVocab: attrIdx[word] = idx idx += 1 # Step 3. Second pass, extracting feature attrIdxOffset = 1 # Because the first attribute is "CLASS_LABEL", \ # so we need to add this offset nSen = 0 featureList = [] for wrtId in dataDict.keys(): # wrtId (int) classVal = wd.GetValueByWid(wrtId, classColStr) # classId, nationality if classVal is None: continue dataList = dataDict[wrtId] for dataStr in dataList: # process each sentence attrList = [] attrList.append('%d %s' % (0, classVal)) # Add class id first fes = self.__processSingleInstance(dataStr) # features from this tree # TODO This is numeric version!! feDict = {} for fe in fes: if not attrIdx.has_key(fe): continue idx = attrIdx[fe] + attrIdxOffset if feDict.has_key(idx): feDict[idx] += 1.0 else: feDict[idx] = 1.0 if len(feDict) <= 0: # TODO maybe reserve empty instace for combination print("Extract 0 features for data: " + dataStr) featureList.append(attrList) # Only has one attr, i.e. the class id continue # Sort the attribute list, as required for Sparse Data format in Weka sortedFeDict = sorted(feDict.iteritems(), key = operator.itemgetter(0)) # Write up atrribute list for (idx,freq) in sortedFeDict: attrList.append('%d %.1f' % (idx, freq)) # 1, means 'norminal' # Final step, adding to feautreList to return featureList.append(attrList) return (classSet, sortedVocab, featureList)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_tree(tree):\n return find_noun(tree)\n\n # noun_phrase = re.match(\"NP|WHNP\", tree.parent().label())\n # noun = re.match(\"NN.*\", tree.label())\n # return noun_phrase and noun", "def feat_dict(pos_feat,text):\n dict = {}\n bigrams = ngrams(word_tokenize(text),2)\n trigrams = ngrams(word_tokenize(text),3)\n \n for feat in pos_feat:\n dict[feat]=features(feat,text,bigrams,[],[])\n return dict", "def parse_tree_features(df):\n \n nlp = spacy.load(SPACY_MODEL, disable=['ner'])\n nlp.add_pipe(BeneparComponent(\"benepar_en_small\"))\n \n # parse text\n df['B_Tokens'] = df['Text'].apply(lambda x: nlp(x))\n \n # get features\n df['NP_per_sent'], df['VP_per_sent'], df['PP_per_sent'], \\\n df['SBAR_per_sent'], df['SBARQ_per_sent'], df['avg_NP_size'], \\\n df['avg_VP_size'], df['avg_PP_size'], df['avg_parse_tree'] = zip(*df['B_Tokens'].map(_get_parse_tree_features))\n \n # remove B_Tokens\n df.drop(columns=[\"B_Tokens\"], inplace=True)\n \n return df", "def filter_tree(tree):\n return find_pos(tree)\n\n # if tree.parent() is None:\n # return False\n\n # noun_phrase = re.match(\"NP|VP\", tree.parent().label())\n # pos = re.match(\"NN.*|JJ|CD|VBN\", tree.label())\n # return noun_phrase and pos", "def extract(self, document):\n f_num = len(self.feature_list)\n feature_vector = np.zeros((f_num,))\n words = document.split()\n for i in xrange(len(words)):\n for n in self.ns:\n ngram = self.try_get_ngram(words, n, i)\n if ngram and ngram in self.ngrams:\n self.add_ngram(feature_vector, ngram)\n return feature_vector", "def extract_feats(word, nlp):\n feat_dict = {}\n feat_string = ''\n doc = nlp(word).to_dict()[0][0]\n if 'feats' in doc:\n for pair in doc['feats'].split('|'):\n feat, val = pair.split('=')\n feat_dict[feat] = val\n feat_string += feat + ': ' + val + ', '\n if feat_string:\n feat_string = ' (' + feat_string[:-2] + ')'\n return feat_dict, feat_string", "def morph_features(prev_word, pos, nlp, lm):\n feat_string = ''\n feat_dict = {}\n # Collect 15 words which are most likely to follow the previous word w_(i-1)\n # according to the LM and preserve only the ones that belong to the same\n # part of speech as the current word w_i\n for word, _ in lm.counts.__getitem__([prev_word]).most_common(20):\n doc = nlp(word).to_dict()[0][0]\n if 'feats' in doc and (doc['upos'] == pos or doc['xpos'].split('|')[0] == pos):\n for pair in doc['feats'].split('|'):\n feat, val = pair.split('=')\n if feat in feat_dict:\n feat_dict[feat].append(val)\n else:\n feat_dict[feat] = [val]\n #print(feat_dict)\n # Find the most common value for each feature\n for dict_key in feat_dict.keys():\n feat_string += dict_key + ': ' + max(set(feat_dict[dict_key]), \n key = feat_dict[dict_key].count) + ', '\n if feat_string:\n feat_string = ' (' + feat_string[:-2] + ')'\n return feat_dict, feat_string", "def pos_treebank(data_word):\n #returns dict\n w_pos_treebank = nltk.pos_tag(data_word)\n w_pos_treebank = dict(w_pos_treebank)\n return w_pos_treebank", "def get_feature_set_SB(tweet):\n #pos-tag frequencies\n# print \"Tagged words in tweet: \", tweet.tagged_words\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n# print \"Tag frequencies: \", pos_tag_freq\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n# print \"Additional frequencies: \", additional_freq\n# raw_input(\"Continue?\")\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n# print \"All features: \", features\n# raw_input(\"Continue?\")\n return features", "def read_feature_dict(path):\n feature_dict = []\n with open(path, 'r', encoding='utf-8') as dictfile:\n for line in dictfile:\n if line.lstrip(' \\t').startswith('#'):\n # This line is a comment line, ignore it\n continue\n else:\n # This line contains one or more tokens, split them up and wrap them in the format for VisaS POS files.\n tokens = line.rstrip(' \\t\\n').rstrip(' \\t').split()\n dict_tokens = \"\"\n for token in tokens:\n quantifier = \"\"\n if re.match(\"\\(.+\\)([?*+])\",token):\n quantifier = re.match(\"\\(.+\\)([?*+])\",token).group(1)\n token = token.lstrip('(').rstrip(')?*+')\n if '_' in token:\n if token.startswith('_'):\n # token starts with '_' and is a POS tag\n if quantifier:\n dict_tokens = dict_tokens + \"(?:(\\d+)\\.(\\d+):\\S+?_\" + token.lstrip('_').replace(\"(\",\"(?:\") + \" )\" + quantifier\n else:\n dict_tokens = dict_tokens + \"(\\d+)\\.(\\d+):\\S+?_\" + token.lstrip('_').replace(\"(\",\"(?:\") + \" \"\n else:\n try:\n # token is a lemma with POS tag attached, split the lemma and pos tag\n pos_token = token.split('_')\n if quantifier:\n dict_tokens = dict_tokens + \"(?:(\\d+)\\.(\\d+):\" + pos_token[0].replace(\"(\",\"(?:\") + \"_\" + pos_token[1].replace(\"(\",\"(?:\") + \" )\" + quantifier\n else:\n dict_tokens = dict_tokens + \"(\\d+)\\.(\\d+):\" + pos_token[0].replace(\"(\",\"(?:\") + \"_\" + pos_token[1].replace(\"(\",\"(?:\") + \" \"\n\n except IndexError:\n print(\"Warning! Invalid token found in line '\" + line + \"'\")\n elif token == '...':\n # ... is converted to one or more arbitrary tokens\n dict_tokens = dict_tokens + \"(?:(\\d+)\\.(\\d+):\\S+_\\S+? )+\"\n else:\n # token is a lemma without POS tag\n if quantifier:\n dict_tokens = dict_tokens + \"(?:(\\d+)\\.(\\d+):\" + token.replace(\"(\", \"(?:\") + \"_\\S+? )\" + quantifier\n else:\n dict_tokens = dict_tokens + \"(\\d+)\\.(\\d+):\" + token.replace(\"(\", \"(?:\") + \"_\\S+? \"\n if dict_tokens:\n feature_dict.append(dict_tokens)\n if len(feature_dict) is 0:\n print(\"Warning! No valid entries found in dictionary \" + path)\n return None\n else:\n return feature_dict", "def pos_features(compactcorpus):\n start=time()\n \n wrds = common_but_unique(ngrams_dict(1,authors,compactcorpus,25,False),8)\n bigrams = common_but_unique(ngrams_dict(2,authors,compactcorpus,25,False),8)\n trigrams = common_but_unique(ngrams_dict(3,authors,compactcorpus,25,False),8)\n #tag_bigrams =common_but_unique(ngrams_dict(2,authors,compact_to_tag(compactcorpus),20,False),15) #PAS OP Duurt erg lang om te gebruiken (dus ook nog niet getest...ivm tijd)\n skipgrams = common_but_unique(skipgrams_dict(authors,compactcorpus,10),10)\n\n minimal_wrdoccurence = [\"wrd:\"+wrd+\">\"+str(num) for wrd in wrds for num in range(0,1)]\n minimal_trigram_occurence = [\"tri:(\"+str(tri[0])+\",\"+str(tri[1])+\",\"+str(tri[2])+\")>\"+str(num) for tri in trigrams for num in range(0,1)]\n minimal_bigram_occurence = [\"bi:(\"+str(bi[0])+\",\"+str(bi[1])+\")>\"+str(num) for bi in bigrams for num in range(0,1)]\n #minimal_skipgram_occurence = [\"skip:(\"+str(skip[0])+\",\"+str(skip[1])+\",\"+str(skip[2])+\")>\"+str(num) for skip in skipgrams for num in range(0,1)]\n\n features = minimal_bigram_occurence + minimal_wrdoccurence + minimal_trigram_occurence #+ minimal_skipgram_occurence\n print \"pos feat in:\"+str(time()-start)\n return features", "def extract_features(self, doc):\n\n features = dict()\n\n bow = self.vectorize_doc_simple(doc)\n\n charcount = self.char_count(doc)\n wordcount = self.word_count(doc)\n sentencecount = self.sentence_count(doc)\n paragraphcount = self.paragraph_count(doc)\n\n # extract characters features\n features['characters per word'] = charcount / wordcount\n features['characters per sentence'] = charcount / sentencecount\n features['characters per paragraph'] = charcount / paragraphcount\n features['characters per document'] = charcount\n\n features['word characters length variance'] = numpy.std(\n self.word_char_length_variance(doc))\n features['sentence characters length variance'] = numpy.std(\n self.sentence_char_length_variance(doc))\n\n # extract words features\n features['words per sentence'] = wordcount / sentencecount\n features['words per paragraph'] = wordcount / paragraphcount\n features['words per document'] = wordcount\n\n features['sentence words length variance'] = numpy.std(\n self.sentence_words_length_variance(doc))\n\n # extract sentences features\n features['sentences per paragraph'] = sentencecount / paragraphcount\n features['sentences per document'] = sentencecount\n\n # extract paragraphs features\n features['paragraphs per document'] = paragraphcount\n\n # extract syllables features\n syllablecount = 0\n for word, count in bow.iteritems():\n syllablecount += self.num_of_syllables(word) * count\n features['syllables per word'] = syllablecount / wordcount\n features['syllables per sentence'] = syllablecount / sentencecount\n features['syllables per paragraph'] = syllablecount / paragraphcount\n\n # extract part of speech features\n tokens = self.pos_tag_doc(doc)\n\n pos_counts = self.vectorize_pos_tags(tokens)\n poswordcount = sum(pos_counts.values())\n for i in xrange(82, 101):\n features['%d per word' % i] = pos_counts[i] / poswordcount\n\n sorted_pos_counts = sorted(pos_counts, key=pos_counts.get, reverse=True)\n features['1st top tag'] = str(sorted_pos_counts[0])\n features['2nd top tag'] = str(sorted_pos_counts[1])\n features['3rd top tag'] = str(sorted_pos_counts[2])\n features['4th top tag'] = str(sorted_pos_counts[3])\n features['5th top tag'] = str(sorted_pos_counts[4])\n\n # extract vocab features\n vocabsize = len(self.vectorize_doc_simple(doc))\n features['vocab size'] = vocabsize\n features['words per vocab size'] = wordcount / vocabsize\n\n return features", "def get_trees(self, word): # -> list:\r\n raise NotImplementedError", "def get_wordnet_pos(word: str) -> Dict[str, Any]:\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_nnp_ngrams(original_text, highlight=4, minsize=0):\n minsize = minsize-1\n if minsize<0:\n minsize = 0 \n \n tokens = nltk.wordpunct_tokenize(original_text)\n tagged = nltk.word_tokenize(original_text)\n tagged = nltk.pos_tag(tokens)\n #for word in tagged:\n # print word\n doc_length = len(tokens)\n counter = 0\n counter2 = 0\n if highlight==0:\n concated_test = doc_length # This is set to doc_length but could be anything recommend 3.\n else:\n concated_test = highlight\n list_of_NNPs = []\n while counter < (doc_length-1):\n while counter2 < concated_test:\n counter2 = counter2+1\n counter3 = 0\n #print '--------------------'\n temp_array = []\n all_nnp = True\n while counter3 < counter2:\n if counter < (doc_length-counter3):\n #print tokens[counter+counter3],tagged[counter+counter3][1]\n temp_array.append(tokens[counter+counter3])\n if tagged[counter+counter3][1] != 'NNP':\n all_nnp = False\n counter3 = counter3+1\n counter3 = 0\n if all_nnp == True:\n if(len(temp_array)>minsize):\n list_of_NNPs.append(temp_array)\n #print 'added to main array'\n #else:\n #print 'not all NNPs'\n counter2 = 0\n counter = counter+1\n #for ngram in list_of_NNPs:\n # print ngram\n import itertools\n list_of_NNPs.sort()\n unique_NNPs = list(list_of_NNPs for list_of_NNPs,_ in itertools.groupby(list_of_NNPs))\n return unique_NNPs", "def additional_text_preprocessing_with_pos(pos_dict):\n\n tags_to_lemmatize = ['a', 'n', 'v', 'r']\n\n pos_dict = TextPreprocessor.find_named_entities(pos_dict)\n if pos_dict is None:\n return None, None\n prepro = list()\n contains_spelling_mistake = False\n for t in pos_dict:\n token = t['token']\n tag = t['tag'].lower()\n if token not in TextPreprocessor.PUNCTUATION and tag != \",\":\n\n token = TextPreprocessor.replace_user_mentions(token)\n token = TextPreprocessor.replace_urls(token)\n replaced = [token]\n for i in replaced:\n\n i = TextPreprocessor.replace_all_punctuation(i)\n if i.lower() not in TextPreprocessor.STOPWORDS and i != 'URL' and i!= 'USERMENTION':\n if i != \"\" and not re.match('\\B#\\w*[a-zA-Z]+\\w*', i):\n before = i\n i = TextPreprocessor.SPELL_CHECKER.correct(i, tag)\n if i != before:\n contains_spelling_mistake = True\n if tag in tags_to_lemmatize:\n i = TextPreprocessor.lemmatize(i, tag)\n i = TextPreprocessor.stem(i, tag)\n # check again, since stemming, lemmatization or spelling correction can produce stopwords\n # if i.lower() not in TextPreprocessor.STOPWORDS:\n if i != 'URL' and i!= 'USERMENTION' and i!='':\n i = i.lower()\n if re.match(\".*[a-zA-Z]'\", i):\n i = i[:-1]\n prepro.append(i)\n return prepro, contains_spelling_mistake", "def extract_features(document):\n document_words = set(document)\n features = {}\n global word_features\t\n for word in word_features:\n features['contains(%s)' % word] = (word in document_words)\n return features", "def toLingDataToken(token):\n\n t = Token()\n\n t.set(\n id=token.i,\n word=token.orth_,\n lemma=token.lemma_,\n POS=token.tag_,\n SPOS=token.pos_,\n depID=token.dep,\n depStr=token.dep_,\n NE=token.ent_type_,\n foreign=token.is_oov\n )\n\n # setting features\n '''\n t.features = {}\n #print(t.POS)\n featureStr = translate(t.POS)\n # save string form of feature translation\n t.features['str'] = featureStr\n\n featureArr = featureStr.split(\"+\")\n #print(featureArr)\n # find the first feature\n i = 0\n while len(featureArr[i]) < 1:\n i += 1\n\n t.features['type'] = featureArr[i]\n if t.features['type'] in [\"N\"]:\n # look for number\n i += 1\n while i < len(featureArr):\n # this means it's probably a number declaration\n if len(featureArr[i]) < 4:\n t.features['number'] = featureArr[i]\n # and next feature could be type of noun\n if i + 1 < len(featureArr):\n t.features['isProper'] = featureArr[i + 1]\n break\n i += 1\n\n if t.features['type'] in [\"V\"]:\n # look for person and number\n i += 1\n while i < len(featureArr):\n # this means it's probably a person declaration\n if len(featureArr[i]) < 4:\n t.features['person'] = featureArr[i]\n # and next feature could be number\n if i + 1 < len(featureArr):\n t.features['number'] = featureArr[i + 1]\n break\n else:\n # probably a tense\n t.features['tense'] = featureArr[i]\n t.features['isParticiple'] = (\"Part\" in featureArr[i])\n\n i += 1\n #print(t.features)\n '''\n\n # setting wordType\n if token.tag_ == \"BES\": # copula\n t.set(wordType=4)\n elif token.pos_ == \"VERB\":\n t.set(wordType=1)\n elif token.pos_ == \"NOUN\" or token.pos_ == \"PROPN\":\n t.set(wordType=2)\n elif token.pos_ == \"PRON\":\n t.set(wordType=3)\n else:\n t.set(wordType=5)\n\n # spaCy does not have coreferencing...\n\n return t", "def extract_features_scope(sentence_dicts, mode='training'):\n instances = []\n sentence_splits = []\n for sent in sentence_dicts:\n if not sent['neg']:\n continue\n print(sent)\n graph = make_dir_graph_for_sentence(sent)\n bidir_graph = make_bidir_graph_for_sentence(sent)\n for cue_i, (cue, cue_position, cue_type) in enumerate(sent['cues']):\n seq_length = -1\n for key, value in sent.items():\n features = {}\n if isinstance(key, int):\n features['token'] = value[3]\n features['lemma'] = value[4]\n features['pos'] = value[5]\n features['dir-dep-dist'] = get_shortest_path(graph, sent, cue_position, key)\n features['dep-graph-path'] = get_dep_graph_path(bidir_graph, sent, cue_position, key)\n\n dist = key - cue_position\n nor_index = find_nor_index(sent)\n if cue == \"neither\" and nor_index > -1 and abs(key-nor_index) < abs(dist):\n dist = key - nor_index\n #token is to the left of cue\n if dist < 0:\n if abs(dist) <= 9:\n features['left-cue-dist'] = 'A'\n else:\n features['left-cue-dist'] = 'B'\n features['right-cue-dist'] = 'null'\n #token is to the right of cue\n elif dist > 0:\n if dist <= 15:\n features['right-cue-dist'] = 'A'\n else:\n features['right-cue-dist'] = 'B'\n features['left-cue-dist'] = 'null'\n else:\n features['left-cue-dist'] = '0'\n features['right-cue-dist'] = '0'\n features['cue-type'] = cue_type\n features['cue-pos'] = sent[cue_position][5]\n\n if key == 0:\n features['bw-bigram1'] = 'null'\n features['bw-bigram2'] = 'null'\n else:\n features['bw-bigram1'] = \"%s_*\" %sent[key-1][4]\n features['bw-bigram2'] = \"%s_*\" %sent[key-1][5]\n if not (key+1) in sent:\n features['fw-bigram1'] = 'null'\n features['fw-bigram2'] = 'null'\n else:\n features['fw-bigram1'] = \"*_%s\" %sent[key+1][4]\n features['fw-bigram2'] = \"*_%s\" %sent[key+1][5]\n instances.append(features)\n if key > seq_length:\n seq_length = key\n sentence_splits.append(seq_length)\n if mode == 'training':\n labels = extract_labels_scope(sentence_dicts, mode)\n return sentence_dicts, instances, labels, sentence_splits\n return sentence_dicts, instances, sentence_splits", "def prepocess_pos_tagged_texts(tweet_tokens):\n return [TextPreprocessor.additional_text_preprocessing_with_pos(json.loads(t)) for t in tweet_tokens]", "def extract_features_for_file(input_file, output_file, posfile):\n if not unlabeled:\n sents = read_file(input_file)\n else:\n sents = read_file_unlabeled(input_file)\n postags = get_pos_tags(posfile)\n with open(output_file,'w') as output_fileobj:\n if not unlabeled:\n for tokens,goldtags in sents:\n feats = extract_features_for_sentence(tokens, postags)\n for t in range(len(tokens)):\n feats_tabsep = \"\\t\".join(feats[t])\n print>>output_fileobj, \"%s\\t%s\" % (goldtags[t], feats_tabsep)\n print>>output_fileobj, \"\"\n else:\n for tokens in sents:\n feats = extract_features_for_sentence(tokens, postags)\n for t in range(len(tokens)):\n feats_tabsep = \"\\t\".join(feats[t])\n print>>output_fileobj, \"%s\" % (feats_tabsep) #for nolabels dat\n print>>output_fileobj, \"\"", "def pos_tag(self, sentence):\n tags = []\n tokens = sentence.split(\" \")\n for i in range(len(tokens)):\n tags.append('')\n for i in range (len(tokens)):\n feat = []\n feat.append(self.features(tokens,tags,i))\n tag_predicted = self.postagger.predict(feat)[0]\n tags[i] = tag_predicted\n return tags", "def filter_pos(self):\n all_tokens = []\n for zettel in self.lemma_tokens:\n tokens = []\n for word in zettel:\n if word[1] in ['NN', 'NNS', 'NNP', 'NNPS', 'NG']: # NG = n_gram\n tokens.append(word)\n all_tokens.append(tokens)\n self.lemma_tokens = all_tokens", "def extract_features(data, stopwords=STOPWORDS):\n tags = set()\n docs = []\n for document in data:\n doc_data = dict()\n doc_data['pmid'] = document['sourceid']\n text = document['text']\n\n # Insert PubTator annotations inside abstract\n denotations = document['denotations']\n sorted_denotations = []\n for denotation in denotations:\n begin = denotation['span']['begin']\n end = denotation['span']['end']\n obj = denotation['obj']\n for c in punctuation:\n obj = obj.replace(c, '')\n tags.add(obj)\n doc_data[obj] = doc_data.get(obj,0)+1\n sorted_denotations.append([begin,end,obj])\n sorted_denotations.sort()\n sorted_denotations.reverse()\n for begin, end, obj in sorted_denotations:\n text = text[:begin] + obj + ' ' + text[end:]\n\n doc_data['text'] = clean_text(text, stopwords)\n docs.append(doc_data)\n\n return docs", "def chunk_to_features(chunk, tag_method=None, posdict=None, context_feats=False):\n out_string = StringIO()\n\n num_instances = 0\n # Look for the GLOSS_POS tier\n for inst in chunk:\n gpos_tier = inst.get_pos_tags(GLOSS_WORD_ID, tag_method=tag_method)\n if gpos_tier:\n num_instances += 1\n\n # For each token in the tier...\n for i, gp in enumerate(gpos_tier):\n\n if ALIGNMENT not in gp.attributes:\n continue\n\n word = gp.igt.find(id=gp.attributes[ALIGNMENT]).value()\n tag = gp.value()\n\n prev_word = None\n next_word = None\n\n if context_feats:\n if i > 0:\n prev_word = gp.igt.find(id=gpos_tier[i-1].attributes[ALIGNMENT]).value()\n\n if i < len(gpos_tier)-1:\n next_word = gp.igt.find(id=gpos_tier[i+1].attributes[ALIGNMENT]).value()\n\n\n # Write out features...\n t = GoldTagPOSToken(word, goldlabel=tag)\n write_gram(t,\n feat_prev_gram=context_feats,\n feat_next_gram=context_feats,\n prev_gram=prev_word,\n next_gram=next_word,\n lowercase=True,\n output=out_string,\n posdict=posdict)\n\n return out_string.getvalue(), num_instances", "def get_feature_set_SC(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = 0.0\n obj_score = 0.0\n nrof_subwords = 0\n nrof_objwords = 0\n for word in sentimentvalues.keys():\n if sentimentvalues[word][0]>0:\n sub_score = sub_score + sentimentvalues[word][0]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][1]>0:\n sub_score = sub_score + sentimentvalues[word][1]\n nrof_subwords = nrof_subwords + 1\n if sentimentvalues[word][2]>0:\n obj_score = obj_score + sentimentvalues[word][2]\n nrof_objwords = nrof_objwords + 1\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n if nrof_subwords>0:\n additional_freq[\"subjective_words\"] = nrof_subwords*1.0\n if nrof_objwords>0:\n additional_freq[\"objective_words\"] = nrof_objwords*1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features", "def __getitem__(self, ngram):\n return self.root.__getitem__(ngram)", "def parse_ltag_from_dict(self, tree_dict):\n\n spine = re.sub('[()]', '', tree_dict['spine'])\n node_labels = spine.split()\n nodes = [SpinalLTAG(label, children=[], tree_type=tree_dict['type']) for label in node_labels]\n nodes.append(tree_dict['terminal'])\n\n for current, next in pairwise(nodes):\n current.append(next)\n\n root = nodes[0]\n root.predicate = tree_dict['predicate']\n root.roleset_id = tree_dict['roleset_id']\n root.num_args = tree_dict['num_args']\n root.tree_id = tree_dict['tree_id']\n root.parent_id = tree_dict['parent_id']\n root.parent_attach_id = tuple(tree_dict['parent_attach_id']) if tree_dict['parent_attach_id'] is not None else None\n\n # Create rules and assign them to nodes in tree\n for rule_dict in tree_dict['rules']:\n rule = Rule.from_dict(rule_dict)\n root = self.add_rule_to_tree(root, rule)\n\n return root", "def word2features(self,sent, i):\n word = sent[i][0]\n #postag = sent[i][1]\n\n features = {\n 'bias': 1.0,\n 'word.lower()': word.lower(),\n 'word.isupper()': word.isupper(),\n 'word.istitle()': word.istitle(),\n 'word.isdigit()': word.isdigit(),\n 'word.shape()':self.shape(word),\n 'word.isalnum()':word.isalnum(),\n 'word.isalpha()':word.isalpha(),\n # 'postag': postag,\n # 'postag[:2]': postag[:2],\n }\n if i > 0:\n word1 = sent[i - 1][0]\n #postag1 = sent[i - 1][1]\n features.update({\n '-1:word.lower()': word1.lower(),\n '-1:word.istitle()': word1.istitle(),\n '-1:word.isupper()': word1.isupper(),\n '-1:word.isdigit()': word1.isdigit(),\n '-1:word.isalnum()':word1.isalnum(),\n '-1:word.isalpha()':word1.isalpha(),\n # '-1:postag': postag1,\n # '-1:postag[:2]': postag1[:2],\n })\n else:\n features['BOS'] = True\n\n if i > 1:\n word2 = sent[i - 2][0]\n #postag2 = sent[i - 2][1]\n features.update({\n '-2:word.lower()': word2.lower(),\n '-2:word.istitle()': word2.istitle(),\n '-2:word.isupper()': word2.isupper(),\n '-2:word.isdigit()': word2.isdigit(),\n '-2:word.isalnum()': word2.isalnum(),\n '-2:word.isalpha()': word2.isalpha(),\n # '-2:postag': postag2,\n # '-2:postag[:2]': postag2[:2],\n })\n else:\n features['BOS1'] = True\n if i > 2:\n word3 = sent[i - 3][0]\n #postag3 = sent[i - 3][1]\n features.update({\n '-3:word.lower()': word3.lower(),\n '-3:word.istitle()': word3.istitle(),\n '-3:word.isupper()': word3.isupper(),\n '-3:word.isdigit()': word3.isdigit(),\n '-3:word.isalnum()': word3.isalnum(),\n '-3:word.isalpha()': word3.isalpha(),\n # '-3:postag': postag3,\n # '-3:postag[:2]': postag3[:2],\n })\n else:\n features['BOS2'] = True\n\n if i > 3:\n word4 = sent[i - 4][0]\n #postag4 = sent[i - 4][1]\n features.update({\n '-4:word.lower()': word4.lower(),\n '-4:word.istitle()': word4.istitle(),\n '-4:word.isupper()': word4.isupper(),\n '-4:word.isdigit()': word4.isdigit(),\n '-4:word.isalnum()': word4.isalnum(),\n '-4:word.isalpha()': word4.isalpha(),\n # '-4:postag': postag4,\n # '-4:postag[:2]': postag4[:2],\n })\n else:\n features['BOS2'] = True\n\n if i < len(sent) - 1:\n word1 = sent[i + 1][0]\n features.update({\n '+1:word.lower()': word1.lower(),\n '+1:word.istitle()': word1.istitle(),\n '+1:word.isupper()': word1.isupper(),\n '+1:word.isdigit()': word1.isdigit(),\n '+1:word.isalnum()': word1.isalnum(),\n '+1:word.isalpha()': word1.isalpha(),\n # '+1:postag': postag1,\n # '+1:postag[:2]': postag1[:2],\n })\n else:\n features['EOS'] = True\n if i < len(sent) - 2:\n word12 = sent[i + 2][0]\n #postag12 = sent[i + 2][1]\n features.update({\n '+2:word.lower()': word12.lower(),\n '+2:word.istitle()': word12.istitle(),\n '+2:word.isupper()': word12.isupper(),\n '+2:word.isdigit()': word12.isdigit(),\n '+2:word.isalnum()': word12.isalnum(),\n '+2:word.isalpha()': word12.isalpha(),\n # '+2:postag': postag12,\n # '+2:postag[:2]': postag12[:2],\n })\n else:\n features['EOS2'] = True\n if i < len(sent) - 3:\n word13 = sent[i + 3][0]\n #postag13 = sent[i + 3][1]\n features.update({\n '+3:word.lower()': word13.lower(),\n '+3:word.istitle()': word13.istitle(),\n '+3:word.isupper()': word13.isupper(),\n '+3:word.isdigit()': word13.isdigit(),\n '+3:word.isalnum()': word13.isalnum(),\n '+3:word.isalpha()': word13.isalpha(),\n # '+3:postag': postag13,\n # '+3:postag[:2]': postag13[:2],\n })\n else:\n features['EOS2'] = True\n if i < len(sent) - 4:\n word14 = sent[i + 4][0]\n #postag14 = sent[i + 4][1]\n features.update({\n '+4:word.lower()': word14.lower(),\n '+4:word.istitle()': word14.istitle(),\n '+4:word.isupper()': word14.isupper(),\n '+4:word.isdigit()': word14.isdigit(),\n '+4:word.isalnum()': word14.isalnum(),\n '+4:word.isalpha()': word14.isalpha(),\n # '+4:postag': postag14,\n # '+4:postag[:2]': postag14[:2],\n })\n else:\n features['EOS2'] = True\n return features", "def _get_nouns(self, review):\n review_features = []\n for sent in review:\n doc = self.nlp(sent)\n # noun_phrase = [np.text for np in doc.noun_chunks]\n nouns = [unicode(lemma(str(word).lower())) for word in doc if word.pos == NOUN]\n review_features.append(nouns)\n return review_features", "def tree_features(self):\n return self._tree_features", "def calcTFdict(doc):\n\n TFDict = {}\n\n #counts number of appearances of term in document\n for term in doc:\n if term in TFDict.keys():\n TFDict[term] +=1\n else:\n TFDict[term] = 1\n\n #Computing tf for each term\n for key in TFDict:\n TFDict[key] = TFDict[key]/len(doc)\n\n return TFDict", "def getFeatures(c):\n\n\n feature_list = []\n lc_rc_list = []\n w1 = c.getStack(0)\n w2 = c.getStack(1)\n w3 = c.getStack(2)\n b1 = c.getBuffer(0)\n b2 = c.getBuffer(1)\n b3 = c.getBuffer(2)\n for i in [w1, w2]: #12\n lc = c.getLeftChild(i,1) # 1 st left child of the word on the stack.\n rc = c.getRightChild(i,1) # 1 st right child of the word on the stack.\n lc_rc_list.append(lc)\n lc_rc_list.append(rc)\n lc_rc_list.append(c.getLeftChild(i,2)) # 2 nd left child of the word on the stack\n lc_rc_list.append(c.getRightChild(i,2)) # 2 nd right child of the word on the stack\n lc_rc_list.append(c.getLeftChild(lc,1)) # 1 st left child of the left child of the word on the stack\n lc_rc_list.append(c.getRightChild(rc,1)) # 1 st right child of the right child of the word on the stack\n ########################### 18 Word Features ###########################\n for i in [w1,w2,w3,b1,b2,b3]:\n\n feature_list.append(getWordID(c.getWord(i))) # 6 words of the stack and buffer\n\n for i in lc_rc_list: #12 words of the tree\n feature_list.append(getWordID(c.getWord(i)))\n\n ########################### 18 Tag Features ###########################\n for i in [w1,w2,w3,b1,b2,b3]:\n\n feature_list.append(getPosID(c.getPOS(i))) # 6 tags of the owrds on the stack and the buffer\n\n for i in lc_rc_list:\n feature_list.append(getPosID(c.getPOS(i))) #12 tags of the words onthe stack and the buffer.\n ########################### 12 label Features ###########################\n for i in lc_rc_list:\n feature_list.append(getLabelID(c.getLabel(i))) #12 labels of the words on the stack and the buffer.\n\n\n return feature_list", "def token2features(sent, i, add_neighs=True):\n \n def add_lexicon_feats(tpl, lookupLexiconDict, usedTags):\n if tpl in lookupLexiconDict:\n for cls in lookupLexiconDict[tpl]:\n if cls not in usedTags:\n ftrs.append(cls) #<--------------------\n usedTags[cls]=1\n else:\n usedTags[cls]+=1\n \n \n ftrs = []\n # bias\n ftrs.append(\"BIAS\")\n # position features\n if i == 0:\n ftrs.append(\"SENT_BEGIN\")\n if i == len(sent)-1:\n ftrs.append(\"SENT_END\")\n\n # the word itself\n word = unicode(sent[i])\n ftrs.append(\"WORD=\" + word)\n word_lcase = word.lower()\n ftrs.append(\"LCASE=\" + word_lcase)\n # some features of the word\n if word.isalnum():\n ftrs.append(\"IS_ALNUM\")\n if word.isnumeric():\n ftrs.append(\"IS_NUMERIC\")\n if word.isdigit():\n ftrs.append(\"IS_DIGIT\")\n if word.isupper():\n ftrs.append(\"IS_UPPER\")\n if word.islower():\n ftrs.append(\"IS_LOWER\")\n\n # USE LEXICONS################################################## !\n maxTries=5\n usedTags = {}\n \n #look front up to 5 places \n if type(sent[0])== str: lSent = map(str.lower, sent)\n else: lSent = map(unicode.lower, sent)\n while(maxTries!=0):\n\n if len(lSent)-i>=maxTries:\n tpl = tuple(lSent[i:maxTries+i])\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n maxTries-=1\n \n #also look backwards: lexicons\n if i>=1:\n tpl = tuple(lSent[i-1:i+1]) # size 2\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n if i<len(lSent) : \n tpl = tuple(lSent[i-1:i+2]) # size 3\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n \n #analyze and add bias towards max used classification \n if usedTags:\n usedTags = list(usedTags.iteritems())\n maxused = max(usedTags, key=operator.itemgetter(1))\n minused = min(usedTags, key=operator.itemgetter(1)) \n if minused[1]!=maxused[1]:\n ftrs.append('BIAS='+maxused[0])\n \n\n #R ************************************************\n if len(word) > 15:\n ftrs.append(\"IS_LENGTHY\")\n if word[0].upper():\n ftrs.append(\"IS_FIRST_UPPER\")\n if word.__contains__(\"http\"):\n ftrs.append(\"IS_HYPERLINK\")\n if any(x.isupper() for x in word):\n ftrs.append(\"IS_MIXEDCASE\")\n if word.isupper():\n ftrs.append(\"ALL_UPPERCASE\")\n if word.__contains__(\"@\"):\n ftrs.append(\"IS_TAG\")\n if word.__contains__(\"#\"):\n ftrs.append(\"IS_HASHTAG\")\n if word in stop_words:\n ftrs.append(\"IS_STOPWORD\")\n if word in ['ing','ly','ed','ious','ies','ive','es','s','ment']:\n ftrs.append(\"CONTAINS_SUFFIX\")\n ftrs.append( nltk.pos_tag([word])[0][1] )\n\n # previous/next word feats\n if add_neighs:\n if i > 0:\n for pf in token2features(sent, i-1, add_neighs = False):\n ftrs.append(\"PREV_\" + pf)\n if i < len(sent)-1:\n for pf in token2features(sent, i+1, add_neighs = False):\n ftrs.append(\"NEXT_\" + pf)\n \n \n \n # return it!\n return ftrs", "def get_wordnet_pos(self, word):\n # token = word_tokenize(word)\n base_tag = pos_tag([word])[0][1][:2]\n return self.pos_tag_dict.get(base_tag, wordnet.NOUN)", "def FromDpcofgTree(dpcofg_tre_file):\n taxonomy_root = DndParser(dpcofg_tree.str(), Taxonomy)\n\n def fun(lineage):\n tax_node = taxonomy_root.taxNodeFromTaxon(lineage, False) #w/o root\n if tax_node is None:\n raise ValueError('Not found in taxonomy tree for lineage: %s'\n % lineage)\n return tax_node.getTaxon()[1:] #w/o root\n return fun", "def get_ngramlogprobs(freqdict):\n return", "def find_named_entities(pos_tags):\n contains_proper_noun = False\n tokens = list()\n for tags in pos_tags:\n if tags['tag'] == '^':\n contains_proper_noun = True\n\n if contains_proper_noun:\n for tags in pos_tags:\n if len(tags['token']) == 1:\n tags['token'] = NLPUtils.character_to_unicode(tags['token'])\n tokens.append(tags['token'])\n try:\n text = ' '.join(tokens)\n headers = {\n 'Accept': 'application/json',\n }\n # print(text)\n data = [\n ('text', text),\n ('confidence', '0.25'),\n ('support', '20')\n ]\n\n r = requests.post('http://model.dbpedia-spotlight.org/en/annotate', headers=headers, data=data,\n timeout=10)\n # print(str(r.content.decode()))\n res = r.json()\n\n entities = list()\n if 'Resources' in res:\n for i in res['Resources']:\n # res_str = str(i).replace(',','\\n')\n # print(res_str)\n\n if i['@types'] is not None:\n original = i['@surfaceForm']\n entity_tmp = i['@URI']\n entity_tmp = re.sub('.*/', '', entity_tmp)\n entity_tmp = re.sub('\\(.*\\)', '', entity_tmp)\n entity = re.sub('_', ' ', entity_tmp).strip()\n\n if entity.lower() in text.lower() and ' ' in entity:\n entities.append((entity, int(i['@offset'])))\n # print(entities)\n new_pos_tags = list()\n curr_pos = 0\n tokens_to_omit = 0\n for tags in pos_tags:\n # if re.match(\"U\\+[a-zA-Z0-9]{1,5}\",tags['token']):\n # print(tags['token'])\n # tags['token'] = NLPUtils.unicode_to_character(tags['token'])\n # print(tags['token'])\n\n token = tags['token']\n for e in entities:\n curr_dict = dict()\n if curr_pos == e[1]:\n tokens_to_omit = len(re.split(' ', e[0]))\n curr_dict['token'] = e[0]\n curr_dict['tag'] = '^'\n new_pos_tags.append(curr_dict)\n # +1 for whitespace\n curr_pos += len(token) + 1\n if tokens_to_omit == 0:\n new_pos_tags.append(tags)\n else:\n tokens_to_omit -= 1\n\n # decode unicode sequence\n new_pos_tags = NLPUtils.unicode_to_character_pos_tagged(new_pos_tags)\n return new_pos_tags\n # decode uniocde character\n pos_tags = NLPUtils.unicode_to_character_pos_tagged(pos_tags)\n except Exception as e:\n print(e)\n return None\n\n return pos_tags", "def get_nouns(root):\n nouns = []\n for child in root.findall(\"./xdrs/taggedtokens/tagtoken/tags\"):\n noun = False\n for grandchildren in child.findall(\"./tag[@type='pos']\"):\n if grandchildren.text == 'NN' or grandchildren.text == 'NNS':\n noun = True\n if noun == True:\n for grandchildren in child.findall(\"./tag[@type='lemma']\"):\n nouns.append(grandchildren.text)\n return nouns", "def label(tree):\n return tree[0]", "def extract_entities_from_dependency_parse(dtrees, postag):\n sents = []\n for x in range(0,len(dtrees)):\n tok_list = []\n for node_index in dtrees[x].nodes:\n if node_index != 0:\n node = dtrees[x].nodes[node_index]\n if node['ctag'] == postag:\n tok_list.append((node['word'],postag))\n else:\n tok_list.append((node['word'],'O'))\n sents.append(tok_list)\n return sents", "def tag(text, pos_tagger):\n features = [get_crf_features([word for word in sent]) for sent in text]\n tags = pos_tagger.predict(features)\n tagged_text = []\n for i in range(len(text)):\n tagged_sent = []\n for j in range(len(text[i])):\n tagged_sent.append((text[i][j], tags[i][j]))\n tagged_text.append(tagged_sent)\n #print(tags)\n return tags, tagged_text", "def getFeatures(featureInput):\n featureList = []\n for defTerm,candidateSent in featureInput:\n tokens = nltk.word_tokenize(candidateSent)\n features = {}\n POScenter,POSleft,POSright = wordPOS(tokens,defTerm)\n features['Pos of first Article'] = posFirstArticle(tokens)\n## features['Num Punct Marks'] = numPunctuation(tokens)\n features['Subj words Predicate'] = subWordPerdicate(candidateSent,defTerm,tokens)\n features['Word before def term'] = wordBeforeDef(tokens,defTerm)\n features['POS centered word'] = POScenter\n features['POS left word'] = POSleft\n## features['POS right word'] = POSright \n featureList.append(features)\n return featureList", "def get_pos(sent):\n tokens = get_tokens(sent)\n return [tag for (token, tag) in nltk.pos_tag(tokens)]", "def tree(string, token=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]):\n return Text(string, token)", "def map_pos_tag(pos):\n\n\tmappings = {'NN': wn.NOUN, 'JJ': wn.ADJ, 'VB': wn.VERB, 'RB': wn.ADV}\n\tpos = pos[:2]\n\tif pos in mappings:\n\t\tpos = mappings[pos]\n\telse:\n\t\tpos = wn.NOUN\n\treturn pos", "def parse_for_pos_tagging(sentence):\n try:\n return \" \".join([token.form + \"/\" + token.upos for token in sentence])\n except TypeError: # if a POS tag is missing\n return \"\"", "def improve_tree(tree, freq_dict):\n # todo", "def extract_features(tlc):\n text = clean_text(tlc['body'])\n fields = dict()\n # add features here #\n fields['Top_comment_word_count'] = len(text.split(' '))\n fields['Top_comment_text'] = text\n\n # Extract time-based features\n def get_day_of_week(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').weekday() + 1\n\n def get_day_of_month(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').day\n\n def get_time_of_day(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').hour\n time_local = time.localtime(tlc['created_utc'])\n time_local = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n fields['Top_comment_day'] = get_day_of_month(time_local)\n fields['Top_comment_day_of_week'] = get_day_of_week(time_local)\n fields['Top_comment_hour'] = get_time_of_day(time_local)\n\n # Extract gender value\n gp = GenderPerformr()\n probs, _ = gp.predict(tlc['author'])\n # Rescale it from [0,1] to [-1,1]\n fields['Top_comment_author_gender_value'] = 2 * probs - 1\n\n # Extract percentage of mispellings\n check = SpellChecker(\"en_US\")\n tokenizer = get_tokenizer(\"en_US\")\n # Prevent the denominator from 0\n def weird_division(n, d):\n return n / d if d else 0\n\n def get_mispellings_percentage(text):\n mispelling_count = 0\n total_count = 0\n if text == 'nan':\n return total_count\n else:\n check.set_text(text)\n for err in check:\n mispelling_count = mispelling_count + 1\n for w in tokenizer(text):\n total_count = total_count + 1\n value = weird_division(mispelling_count, total_count)\n return value\n fields['Top_comment_mispellings'] = get_mispellings_percentage(text)\n\n # Get politeness, agreement, support scores, and rescale them from [1,5] to [-1,1]\n ar = Agreementr()\n pr = Politenessr()\n sr = Supportr()\n fields['Top_comment_agreement_value'] = 0.5*float(ar.predict([text]))-1.5\n fields['Top_comment_politeness_value'] = 0.5*float(pr.predict([text]))-1.5\n fields['Top_comment_support_value'] = 0.5*float(sr.predict([text]))-1.5\n\n # Get toxicity scores\n KEY = \"yourkey.txt\" # os.getenv(\"GOOGLE_API_KEY\")\n service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=KEY)\n\n def get_results(request_id, response, exception):\n toxicity_scores.append((request_id, response))\n\n toxicity_scores = []\n count = 0\n batch = service.new_batch_http_request(callback=get_results)\n analyze_request = {\n 'comment': {'text': text},\n \"requestedAttributes\": {\n \"TOXICITY\": {},\n \"SEVERE_TOXICITY\": {},\n \"ATTACK_ON_COMMENTER\": {}\n }\n }\n batch.add(service.comments().analyze(body=analyze_request), request_id=str(count))\n batch.execute()\n toxic_score = toxicity_scores[0][1]['attributeScores']['TOXICITY']['summaryScore']['value']\n attack_score = toxicity_scores[0][1]['attributeScores']['ATTACK_ON_COMMENTER']['summaryScore']['value']\n if toxic_score > 0.5:\n fields['Top_comment_untuned_toxicity'] = 1\n else:\n fields['Top_comment_untuned_toxicity'] = 0\n if toxic_score > 0.8 and attack_score > 0.5:\n fields['Top_comment_tuned_toxicity'] = 1\n else:\n fields['Top_comment_tuned_toxicity'] = 0\n # end of feature extractions #\n return fields", "def find_features(sentence: str) -> Set[str]:\n sent_dict = set()\n sentence = _NLP(sentence)\n for token in sentence:\n # check if the word is an opinion word, then assign sentiment\n if token.text in _OPINION_WORDS:\n # if target is an adverb modifier (i.e. pretty, highly, etc.)\n # but happens to be an opinion word, ignore and pass\n if (token.dep_ == \"advmod\"):\n continue\n elif (token.dep_ == \"amod\"):\n sent_dict.add(token.head.text.lower())\n # for opinion words that are adjectives, adverbs, verbs...\n else:\n for child in token.children:\n # if verb, check if there's a direct object\n if (token.pos_ == \"VERB\") & (child.dep_ == \"dobj\"):\n sent_dict.add(child.text.lower())\n # check for conjugates (a AND b), then add both to dictionary\n subchildren = []\n conj = 0\n for subchild in child.children:\n if subchild.text == \"and\":\n conj=1\n if (conj == 1) and (subchild.text != \"and\"):\n subchildren.append(subchild.text)\n conj = 0\n for subchild in subchildren:\n sent_dict.add(subchild)\n\n # check for nouns\n for child in token.head.children:\n noun = \"\"\n if (child.pos_ == \"NOUN\") and (child.text not in sent_dict):\n noun = child.text\n # Check for compound nouns\n for subchild in child.children:\n if subchild.dep_ == \"compound\":\n noun = subchild.text + \" \" + noun\n sent_dict.add(noun)\n return set(word.lower() for word in sent_dict)", "def extract_features_only(self, text):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.sentences = sentences\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n self.sentence_startPos = locsSentStarts\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n featList = [(feat.getType(), feat.getStartPos(), feat.getEndPos(), feat.getString()) for feat in featObjList]\n return featList", "def pos_tag_features(passage: str):\n pos_tags = [\"CC\", \"CD\", \"DT\", \"EX\", \"FW\", \"IN\", \"JJ\", \"JJR\", \"JJS\", \"LS\", \"MD\", \n \"NN\", \"NNS\", \"NNP\", \"NNPS\", \"PDT\", \"POS\", \"PRP\", \"RB\", \"RBR\", \"RBS\", \"RP\", \"TO\", \"UH\",\n \"VB\", \"VBD\", \"VBG\", \"VBZ\", \"WDT\", \"WP\", \"WRB\"]\n \n tags = pos_tag(word_tokenize(passage))\n tag_list= list()\n \n for tag in pos_tags:\n tag_list.append(len([i[0] for i in tags if i[1] == tag]))\n \n return tag_list", "def get_core_nouns(df):\n\tdf = df[df.pos.str.startswith(u'名詞')\n\t\t\t\t& (df.core_frequency>0)\n\t\t\t\t& (~df.lForm.str.contains(u'■')) # Words including personal info are masked by ■, and cannot be used.\n\t\t\t\t]\n\treturn df", "def _preprocess(self, feature_dict):\n return feature_dict", "def _doc2features(tokens: List[Tuple[str, str]], index: int) -> Dict:\n word, pos = tokens[index]\n f = {\n \"word\": word,\n \"word_is_stopword\": _is_stopword(word),\n \"pos\": pos,\n }\n if index > 0 and index > 1:\n prevprevword, prevprevpos = tokens[index - 2]\n f[\"prev-prev-word\"] = prevprevword\n f[\"prev-prevz-word_is_stopword\"] = _is_stopword(prevprevword)\n f[\"prev-prevz-pos\"] = prevprevpos\n if index > 0:\n prevword, prevpos = tokens[index - 1]\n f[\"prev-word\"] = prevword\n f[\"prev-word_is_stopword\"] = _is_stopword(prevword)\n f[\"prev-pos\"] = prevpos\n else:\n f[\"BOS\"] = True\n if index < len(tokens) - 2:\n nextnextword, nextnextpos = tokens[index + 2]\n f[\"nextnext-word\"] = nextnextword\n f[\"nextnext-word_is_stopword\"] = _is_stopword(nextnextword)\n f[\"nextnext-pos\"] = nextnextpos\n if index < len(tokens) - 1:\n nextword, nextpos = tokens[index + 1]\n f[\"next-word\"] = nextword\n f[\"next-word_is_stopword\"] = _is_stopword(nextword)\n f[\"next-pos\"] = nextpos\n else:\n f[\"EOS\"] = True\n\n return f", "def get_extras(self, text=None):\n if text is None:\n text = self.nltk_text(self.text)\n # Tag parts of speech\n tagged = nltk.pos_tag(text)\n # Try for composed NNP / NNPS\n is_proper_noun = False\n text = []\n proper_noun = \"\"\n for (word, tag) in tagged:\n if not is_proper_noun and (tag == 'NNP' or tag == 'NNPS'):\n # Start building a proper noun\n proper_noun = word\n # Set it true\n is_proper_noun = True\n # Add it to annotations anyway\n text.append(word)\n elif tag == 'NNP' or tag == 'NNPS':\n # Previous was proper noun. So it may be combined\n proper_noun += \" \" + word\n # Add the single word to annotations anyway\n text.append(word)\n elif is_proper_noun and tag == 'IN':\n # Add what we have by now to the text\n text.append(proper_noun)\n # Previous was proper noun. So it may be composed\n proper_noun += \" \" + word\n elif is_proper_noun:\n # Add what we have by now to the text\n text.append(proper_noun)\n # Finished with proper noun, so set it false\n is_proper_noun = False\n # Remove duplicates\n seen = {}\n result = []\n for w in text:\n if w in seen:\n continue\n seen[w] = 1\n result.append(w)\n # Eliminate common\n result = [w for w in result if w.lower() not in self.common_words and\n w.lower() not in stopwords.words('english')]\n return result", "def text_feature_extract(df):\n return df", "def extract_data(file_ner,file_pos,separator=\" \"):\n\n # read NER and POS from the two files\n words_tags=read_conll_file(file_ner)\n words_pos=read_conll_file(file_pos)\n \n ## some checks, e.g., that both files have same length, same tokens\n assert(len(words_tags)==len(words_pos))\n \n for (words,tags),(_,pos) in zip(words_tags,words_pos):\n for word,pos,tag in zip(words,pos,tags):\n # first letter is capitalized\n cap=\"+\" if word[0].isupper() else \"-\"\n hyphen = '+' if '-' in word else '-'\n l = str(len(word))\n #vowels = \"\".join(sorted([w for w in word.lower() if w in ['a','e','i','o','u','y']]))\n #################################\n ###### YOUR FEATURES HERE ####### \n #################################\n # 0=separator\n \n ## todo: output the cap feature and more \n ## make sure the format you output here is what the nerfeats.py script expects as fields!\n print separator.join([word.lower(),pos,cap, l, hyphen, tag])\n # sentence separator\n print \"\"", "def pos_tree(self) -> TemplateTree:\n return TemplateTree.parse(self.normalized_pos_tags(), self.restrictions)", "def adjust_tree(tree, args_dict = {}):\n if ((tree.category() == 'VBar') and (len(tree.children) == 2) and (tree.children[1].label.has_key('SUBCAT')) and (tree.children[1].label['SUBCAT'] == 'copula')):\n if (tree.children[0].label[feature_type] == 'DP'):\n DP = tree.children[0].label\n tree.children[0].label = FeatStructNonterminal(dict([item for item in DP.items() if (item[0] != 'PARTICLE')] + [('PARTICLE', 'pred')])) # give the DP a dummy particle\n if ((tree.category() == 'TP') and (len(tree.children) == 1)): # insert vacuous subject node\n tree.children = [SynTree(Trace(tree.children[0].ID, False), [], tree.QR_level, tree.language), tree.children[0]]\n if ((tree.category() == 'DBar') and (len(tree.children) == 1) and (tree.children[0].category() == 'NP')): # insert ambiguous determiner\n tree.children = [SynTree(FeatStructNonterminal([('PropN', False), (feature_type, 'D'), ('TRACE', False)]), [SynTree('*det*', [], tree.QR_level, tree.language)], tree.QR_level, tree.language), tree.children[0]]\n return args_dict", "def map_postags(treebank_tag):\n\n if treebank_tag.startswith('J'):\n return \"a\"\n elif treebank_tag.startswith('V'):\n return \"v\"\n elif treebank_tag.startswith('N'):\n return \"n\"\n elif treebank_tag.startswith('R'):\n return \"r\"\n else:\n return 'n'", "def get_feature_set_PC(tweet, sentimentvalues):\n features= {\n 'text_length': np.log(len(tweet.text))\n } #ADD ADDITIONAL FEATURES\n if tweet.nrof_sademoticons>0:\n features['sademoticons'] = tweet.nrof_sademoticons\n if tweet.nrof_happyemoticons>0:\n features['happyemoticons'] = tweet.nrof_happyemoticons\n \n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n features[tag] = features.get(tag, 0) + 1\n if tag in ADJECTIVES:\n features['adjectives'] = features.get(tag, 0) + 1\n elif tag in ADVERBS: \n features['adverbs'] = features.get(tag, 0) + 1\n elif tag in PRONOUNS:\n features['pronoun'] = 1\n except KeyError:\n continue\n for key in features.keys():\n features[key] = features[key]*1.0\n \n #Add lexical features\n # total polarity score, number of positive words, number of negative words\n pos_score = 0\n neg_score = 0\n nrof_pos_words = 0\n nrof_neg_words = 0\n for word in sentimentvalues.keys():\n if sentimentvalues[word][0]>0:\n nrof_pos_words = nrof_pos_words + 1\n pos_score = pos_score + sentimentvalues[word][0]\n if sentimentvalues[word][1]>0:\n nrof_neg_words = nrof_neg_words + 1\n neg_score = neg_score + sentimentvalues[word][1]\n\n if neg_score>0:\n features['neg_score'] = neg_score+1.0\n if pos_score>0:\n features['pos_score'] = pos_score+1.0\n if nrof_pos_words>0:\n features['positive_words'] = nrof_pos_words*1.0\n if nrof_neg_words>0:\n features['negative_words'] = nrof_neg_words*1.0\n \n return features", "def nltk_tree(sentence):\n from nltk import tree\n def do_pnp(pnp):\n # Returns the PNPChunk (and the contained Chunk objects) in NLTK bracket format.\n s = ' '.join([do_chunk(ch) for ch in pnp.chunks])\n return '(PNP %s)' % s\n \n def do_chunk(ch):\n # Returns the Chunk in NLTK bracket format. Recurse attached PNP's.\n s = ' '.join(['(%s %s)' % (w.pos, w.string) for w in ch.words])\n s+= ' '.join([do_pnp(pnp) for pnp in ch.attachments])\n return '(%s %s)' % (ch.type, s)\n \n T = ['(S']\n v = [] # PNP's already visited.\n for ch in sentence.chunked():\n if not ch.pnp and isinstance(ch, Chink):\n T.append('(%s %s)' % (ch.words[0].pos, ch.words[0].string))\n elif not ch.pnp:\n T.append(do_chunk(ch))\n #elif ch.pnp not in v:\n elif ch.pnp.anchor is None and ch.pnp not in v:\n # The chunk is part of a PNP without an anchor.\n T.append(do_pnp(ch.pnp))\n v.append(ch.pnp)\n T.append(')')\n return tree.bracket_parse(' '.join(T))", "def __init__(self):\n self.end_of_ngram = False #Flag marking whether this node is the end of an n-gram.\n self.value = None #Provided that the node marks the end of an n-gram, this refers to the value mapped by this n-gram.\n self.children = dict() #A dictionary which maps the next elements in the current path of the prefix tree to the respective node of the tree.", "def get_feature_set_SC2(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = sentimentvalues[0]+sentimentvalues[1]\n obj_score = sentimentvalues[2]\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features", "def get_wordnet_pos(pos):\n tag = pos.upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def test_process_label_in_node(self):\n tree = Node(children=[\n Node(\"Defining secret phrase.\", label=['AB', 'a']),\n Node(\"Has secret phrase. Then some other content\", \n label=['AB', 'b'])\n ], label=['AB'])\n t = Terms(tree)\n t.scoped_terms = {\n ('AB',): [Ref(\"secret phrase\", \"AB-a\", (9,22))]\n }\n # Term is defined in the first child\n self.assertEqual([], t.process(tree.children[0]))\n self.assertEqual(1, len(t.process(tree.children[1])))", "def resolve_features(grammar, mdict, parent, children):\n if ((len(children) == 1) and (isinstance(children[0], (str, unicode)))): # case where child is a lexeme\n for featstruct in mdict[children[0]]:\n pair = share_features(parent, featstruct)\n if pair:\n for word in mdict[children[0]][featstruct]:\n yield (pair[0], [word])\n elif ((len(children) in [1, 2]) and all([isinstance(child, FeatStructNonterminal) for child in children])):\n for prod in grammar.productions():\n parent2, children2 = prod.lhs(), list(prod.rhs()) # from the production rule\n parent_pair = share_features(parent, parent2)\n if parent_pair:\n if (len(children) != len(children2)): # can't be a valid production rule\n continue\n child_pairs = [share_features(children[i], children2[i]) for i in xrange(len(children))]\n if all(child_pairs):\n var_dict = dict() # maps (feature, variable) to structures, e.g. [True, [False, True]] means that it occurs in the parent and the right child\n for feature, val in parent2.items():\n if isinstance(val, Variable):\n var_dict[(feature, val)] = [True, [False for child in children2]]\n for i in xrange(len(children2)):\n for feature, val in children2[i].items():\n if isinstance(val, Variable):\n if var_dict.has_key((feature, val)):\n var_dict[(feature, val)][1][i] = True\n else:\n var_dict[(feature, val)] = [False, [(i == j) for j in xrange(len(children2))]]\n features = set([key[0] for key in var_dict.keys()])\n variables = set([key[1] for key in var_dict.keys()])\n if ((len(features) != len(var_dict)) or (len(variables) != len(var_dict))):\n raise ValueError(\"Not a one-to-one mapping between variables and features.\")\n var_val_dict = dict()\n mismatch = False\n for (feature, var), val in var_dict.items():\n values_of_var = []\n if (val[0] and (parent[feature] != var)):\n values_of_var.append(parent[feature])\n for i in xrange(len(children)):\n if (val[1][i] and (children[i][feature] != var)):\n values_of_var.append(children[i][feature])\n if (len(set(values_of_var)) > 1): \n mismatch = True\n break\n if (len(values_of_var) > 0):\n var_val_dict[var] = values_of_var[0]\n if mismatch:\n continue\n d_parent = dict(parent_pair[0].items())\n d_children = [dict(child_pair[0].items()) for child_pair in child_pairs]\n for (var, val) in var_val_dict.items():\n for d in ([d_parent] + d_children):\n for feature in d:\n if (d[feature] == var):\n d[feature] = val\n yield (FeatStructNonterminal(d_parent), [FeatStructNonterminal(d_child) for d_child in d_children])", "def nltk_tree(self):\n return nltk_tree(self)", "def lexicon_features(tokens, feats):\n# feats['neg_words'] = 0;\n# feats['pos_words'] = 0;\n# tokens = list([token.lower() for token in tokens])\n# feats['neg_words'] , feats['pos_words'] = np.count_nonzero(np.in1d(tokens, list(neg_words))), np.count_nonzero(np.in1d(tokens, list(pos_words)))\n neg_count=0\n pos_count=0\n for i in tokens:\n if i.lower() in neg_words:\n neg_count+=1\n if i.lower() in pos_words:\n pos_count+=1\n feats[\"neg_words\"]=neg_count\n feats[\"pos_words\"]=pos_count", "def create_feature_map(string, features):\n fmap = {}\n vec = create_vector(string)\n\n for ngram in features:\n if ngram in vec:\n fmap[ngram] = vec[ngram]\n\n return fmap", "def doc2features(self,sent):\n return [self.word2features(sent['tokens'], i) for i in range(len(sent['tokens']))]", "def get_wordnet_pos(treebank_tag):\n\n if treebank_tag == 'NNP':\n return wordnet.NOUN, 'proper'\n\n # JJ-adjective\n # JJR-adjective, comparative\n # JJS-adjective, superlative\n elif treebank_tag.startswith('J'):\n return wordnet.ADJ, 'adj'\n\n # VB-verb, base form\n # VBD-verb, past tense\n # VBG-verb, gerund or present participle; VBN-verb, past participle\n # VBP-verb, non-3rd person singular present\n # VBZ-verb, 3rd person singular present\n elif treebank_tag.startswith('V'):\n return wordnet.VERB, 'verb'\n\n # RB-adverb\n # RBR-adverb, comparative\n # RBS-adverb, superlative\n # RP-particle\n elif treebank_tag.startswith('R'):\n return wordnet.ADV, 'adv'\n\n # NN-noun\n elif treebank_tag.startswith('N'):\n return wordnet.NOUN, 'noun'\n\n # default\n else:\n return wordnet.NOUN, ''", "def update_tip_names(tree, taxdict):\n\n list_nodes = []\n uniprot_mapping = pd.DataFrame(columns=['taxid', 'name', 'uniprot'])\n\n counter = 0\n for node in tree.traverse(\"postorder\"):\n current_name = node.name\n\n if 'NMR' in current_name:\n new_name = \"Heterocephalus_glaber\"\n node.name = new_name\n list_nodes.append(node.name)\n taxid = \"NA\" \n uniprot_mapping.loc[counter] = (taxid, new_name, \"UP000006813\")\n counter += 1\n\n elif 'Nfurzer' in current_name:\n new_name = \"Nothobranchius_furzeri\"\n node.name = new_name\n list_nodes.append(node.name)\n taxid = \"NA\"\n uniprot_mapping.loc[counter] = (taxid, new_name, new_name)\n counter += 1\n\n elif 'TAX' in current_name:\n taxid = current_name[3:].split('x')[0]\n new_name = taxdict.get(taxid, taxid) \n node.name = new_name \n list_nodes.append(node.name)\n unip = get_uniprot(taxid, accession)\n uniprot_mapping.loc[counter] = (taxid, new_name, unip)\n counter += 1\n\n\n \n tree.write(outfile=\"../../data/tree/tree.nw\")\n\n nodes_df = pd.DataFrame(list_nodes)\n nodes_df.to_csv(\"../../data/tree/tree_list_nodes.txt\", index=False, header=False)\n\n uniprot_mapping.to_csv(\"../../data/tree/tree_uniprot.txt\", sep='\\t', index=False, header=True)\n\n return tree, list_nodes", "def features(self, sent, position):\n if type(sent[0]) is str:\n fts = []\n if self.training:\n curr_word = 'curr=' + sent[position].lower()\n fts.append(curr_word)\n elif sent[position].lower() in self.vocab:\n curr_word = 'curr=' + sent[position].lower()\n fts.append(curr_word)\n else:\n curr_word = 'curr=UNK'\n fts.append(curr_word)\n prefix = 'pref=' + sent[position][:2].lower()\n suffix = 'suff=' + sent[position][-2:].lower()\n if position == 0:\n prev_word1 = 'prev_word1=*START*'\n fts.append(prev_word1)\n if position == len(sent) - 1:\n next_word1 = 'next_word1=*END*'\n fts.append(next_word1)\n if position >= 1:\n if self.training:\n prev_word1 = 'prev_word1=' + sent[position - 1].lower()\n fts.append(prev_word1)\n elif 'prev_word1=' + sent[position - 1].lower() in self.vocab:\n prev_word1 = 'prev_word1=' + sent[position - 1].lower()\n fts.append(prev_word1)\n else:\n prev_word1 = 'prev_word1=UNK'\n fts.append(prev_word1)\n\n if position >= 2:\n if self.training:\n prev_word2 = 'prev_word2=' + sent[position - 2].lower()\n fts.append(prev_word2)\n elif 'prev_word2=' + sent[position - 2].lower() in self.vocab:\n prev_word2 = 'prev_word2=' + sent[position - 2].lower()\n fts.append(prev_word2)\n else:\n prev_word2 = 'prev_word2=UNK'\n fts.append(prev_word2)\n\n if position <= (len(sent) - 2):\n if self.training:\n next_word1 = 'next_word1=' + sent[position + 1].lower()\n fts.append(next_word1)\n elif 'next_word1=' + sent[position + 1].lower() in self.vocab:\n next_word1 = 'next_word1=' + sent[position + 1].lower()\n fts.append(next_word1)\n else:\n next_word1 = 'next_word1=UNK'\n fts.append(next_word1)\n if position <= (len(sent) - 3):\n if self.training:\n next_word2 = 'next_word2=' + sent[position + 2].lower()\n fts.append(next_word2)\n elif 'next_word2=' + sent[position + 2].lower() in self.vocab:\n next_word2 = 'next_word2=' + sent[position + 2].lower()\n fts.append(next_word2)\n else:\n next_word2 = 'next_word2=UNK'\n fts.append(next_word2)\n\n if self.training:\n fts.append(prefix)\n elif prefix in self.vocab:\n fts.append(prefix)\n if self.training:\n fts.append(suffix)\n elif suffix in self.vocab:\n fts.append(suffix)\n\n else:\n fts = []\n if self.training:\n curr_word = 'curr=' + sent[position][0].lower()\n fts.append(curr_word)\n elif sent[position][0].lower() in self.vocab:\n curr_word = 'curr=' + sent[position][0].lower()\n fts.append(curr_word)\n else:\n curr_word = 'curr=UNK'\n fts.append(curr_word)\n prefix = 'pref=' + sent[position][0][:2].lower()\n suffix = 'suff=' + sent[position][0][-2:].lower()\n if position == 0:\n prev_word1 = 'prev_word1=*START*'\n fts.append(prev_word1)\n if position == len(sent) - 1:\n next_word1 = 'next_word1=*END*'\n fts.append(next_word1)\n if position >= 1:\n if self.training:\n prev_word1 = 'prev_word1=' + sent[position-1][0].lower()\n fts.append(prev_word1)\n elif 'prev_word1=' + sent[position-1][0].lower() in self.vocab:\n prev_word1 = 'prev_word1=' + sent[position-1][0].lower()\n fts.append(prev_word1)\n else:\n prev_word1 = 'prev_word1=UNK'\n fts.append(prev_word1)\n\n if position >= 2:\n if self.training:\n prev_word2 = 'prev_word2=' + sent[position-2][0].lower()\n fts.append(prev_word2)\n elif 'prev_word2=' + sent[position-2][0].lower() in self.vocab:\n prev_word2 = 'prev_word2=' + sent[position-2][0].lower()\n fts.append(prev_word2)\n else:\n prev_word2 = 'prev_word2=UNK'\n fts.append(prev_word2)\n\n if position <= (len(sent) - 2):\n if self.training:\n next_word1 = 'next_word1=' + sent[position+1][0].lower()\n fts.append(next_word1)\n elif 'next_word1=' + sent[position+1][0].lower() in self.vocab:\n next_word1 = 'next_word1=' + sent[position+1][0].lower()\n fts.append(next_word1)\n else:\n next_word1 = 'next_word1=UNK'\n fts.append(next_word1)\n if position <= (len(sent) - 3):\n if self.training:\n next_word2 = 'next_word2=' + sent[position+2][0].lower()\n fts.append(next_word2)\n elif 'next_word2=' + sent[position+2][0].lower() in self.vocab:\n next_word2 = 'next_word2=' + sent[position + 2][0].lower()\n fts.append(next_word2)\n else:\n next_word2 = 'next_word2=UNK'\n fts.append(next_word2)\n\n if self.training:\n fts.append(prefix)\n elif prefix in self.vocab:\n fts.append(prefix)\n if self.training:\n fts.append(suffix)\n elif suffix in self.vocab:\n fts.append(suffix)\n\n return fts", "def pos_text(text):\n nlp = spacy.load('en')\n doc = nlp(text)\n # all tokens that arent stop words or punctuations\n words = [token.text.encode('ascii', 'ignore') for token in doc if token.is_stop != True and token.is_punct != True]\n\n # noun tokens that arent stop words or punctuations\n final_tokens = [token.text.encode('ascii', 'ignore') for token in doc if token.is_stop != True and \\\n token.is_punct != True and (token.pos_ == \"NOUN\" or token.pos_ == \"VERB\")]\n\n # frequency dictionary for all tokens\n word_freq = Counter(words)\n\n #top 100 words to display in wordcloud which are noun or verb\n #frequency will be used to show big/small words in wordcloud\n final_tokens_freq = Counter(final_tokens)\n result = final_tokens_freq.most_common(config.config[\"MAX_FREQUENCY\"])\n #print result\n return result", "def tag_ptree(ptree, coreflist):\n pattern = r\"\"\"(?P<lp>\\(?\\s*) # left parenthesis\n (?P<tg>[a-zA-Z$]+)? # POS tag\n (?P<data>\\s*%s) # subtree of tag\n (?P<rp>(?:\\s*\\))*) # right parenthesis\n \"\"\"\n for cid, coref in coreflist[::-1]:\n words = ''.join(word_tokenize(coref['text']))\n\n nltktree = Tree.parse(ptree)\n nltktree.reverse() # perform search right to left\n data = None\n for subtree in nltktree.subtrees(): # BFS\n if ''.join(subtree.leaves()) == words: # equal ignoring whitespace\n data = subtree.pprint()\n break\n\n # If found via breadth-first search of parse tree\n if data:\n ptree = ptree.replace(data, '( COREF_TAG_%s%s)' % (cid, data))\n else: # Try finding via regex matching instead\n dpattern = r'\\s*'.join([r'\\(\\s*[a-zA-Z$]+\\s+%s\\s*\\)' % word\n for word in word_tokenize(coref['text'])])\n found = re.findall(pattern % dpattern, ptree, re.X)\n if found:\n repl = '%s%s ( COREF_TAG_%s%s) %s' % (found[0][0],\n found[0][1],\n cid,\n found[0][2],\n found[0][3])\n ptree = re.sub(pattern % dpattern, repl, ptree, 1, re.X)\n\n return ptree", "def get_feature_set_PC2(tweet, sentimentvalues):\n features= {\n 'text_length': np.log(len(tweet.text))\n } #ADD ADDITIONAL FEATURES\n if tweet.nrof_sademoticons>0:\n features['sademoticons'] = tweet.nrof_sademoticons\n if tweet.nrof_happyemoticons>0:\n features['happyemoticons'] = tweet.nrof_happyemoticons\n \n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n features[tag] = features.get(tag, 0) + 1\n if tag in ADJECTIVES:\n features['adjectives'] = features.get(tag, 0) + 1\n elif tag in ADVERBS: \n features['adverbs'] = features.get(tag, 0) + 1\n elif tag in PRONOUNS:\n features['pronoun'] = 1\n except KeyError:\n continue\n for key in features.keys():\n features[key] = features[key]*1.0\n \n #Add lexical features\n # total polarity score, number of positive words, number of negative words\n pos_score = sentimentvalues[0]\n neg_score = sentimentvalues[1]\n\n if pos_score>0:\n features['pos_score'] = pos_score+1.0\n if neg_score>0:\n features['neg_score'] = neg_score+1.0\n \n return features", "def get_featured_tree(self):\n\n for t in self.tree.get_terminals():\n t.sample_series = self.feature_table[t.name]\n self.feature_tree = self.recursion_tree(self.tree.root)\n for clade in self.feature_tree.find_clades(order='level'):\n clade.depth = 1+len(self.feature_tree.get_path(clade))\n \n #i = 0\n #for clade in self.feature_tree.find_clades(order='level'):\n # clade.ID_num = i \n #clade.abu = np.mean(clade.sample_series.values)\n #clade.domain_otu = clade.sample_series.idxmax()", "def _features_of(entry: _LexiconEntry) -> str:\n return entry[\"features\"]", "def features(self, sentence, tags, index):\n return{\n 'word': sentence[ index ],\n 'prevWord': '' if index == 0 else sentence[ index - 1 ],\n 'nextWord': '' if index == len( sentence ) -1 else sentence[ index + 1 ],\n 'isFirst': index == 0,\n 'isLast': index == len( sentence ) - 1,\n 'isCapitalized': sentence[index][0].upper() == sentence[ index ][ 0],\n 'isAllCaps': sentence[ index ].upper() == sentence[ index ],\n 'isAllLowers': sentence[ index ].lower() == sentence[ index ],\n 'prefix-1': sentence[ index ][ 0 ],\n 'prefix-2': '' if ( len(sentence) < 2 ) else sentence[ index ][:2],\n 'prefix-3': '' if ( len(sentence) < 3 ) else sentence[ index ][:3],\n 'prefix-4': '' if ( len(sentence) < 4 ) else sentence[ index ][:4],\n 'suffix-1': sentence[ index ][ -1 ],\n 'suffix-2': '' if ( len(sentence) < 2 ) else sentence[ index ][-2:],\n 'suffix-3': '' if ( len(sentence) < 3 ) else sentence[ index ][-3:],\n 'suffix-4': '' if ( len(sentence) < 4 ) else sentence[ index ][-4:],\n 'tag-1': '' if index == 0 else tags[ index - 1 ],\n 'tag-2': '' if index < 2 else tags[ index - 2 ]\n }", "def _extract_opinions(self):\n self.data['adjectives'] = self.data['sentences'].apply(lambda x: self._extract_pos(x, ADJ))\n self.data['adverbs'] = self.data['sentences'].apply(lambda x: self._extract_pos(x, ADV))\n self.data['verbs'] = self.data['sentences'].apply(lambda x: self._extract_pos(x, VERB))", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n return tag_dict.get(tag, wordnet.NOUN)", "def extract_tags(self, sentence, topK=20, withWeight=False, allowPOS=(), withFlag=False):\n if allowPOS:\n # 參考[Python frozenset()](https://www.programiz.com/python-programming/methods/built-in/frozenset)\n # The frozenset() method returns an immutable frozenset object \n # initialized with elements from the given iterable.\n allowPOS = frozenset(allowPOS)\n # words為generator of pair(pair類別定義於jieba/posseg/__init__.py檔)\n # 其中pair類別的物件具有word及flag(即詞性)兩個屬性\n words = self.postokenizer.cut(sentence)\n else:\n # words為generator of str\n words = self.tokenizer.cut(sentence)\n # 計算詞頻(即TF,term frequency)\n freq = {}\n for w in words:\n if allowPOS:\n # 僅選取詞性存在於allowPOS中的詞\n if w.flag not in allowPOS:\n continue\n # 僅回傳詞彙本身\n elif not withFlag:\n w = w.word\n # 在allowPOS及withFlag皆為True的情況下,從w中取出詞彙本身,設為wc\n # 如果不符上述情況,則直接將wc設為w\n wc = w.word if allowPOS and withFlag else w\n if len(wc.strip()) < 2 or wc.lower() in self.stop_words:\n #略過長度小於等於1的詞及停用詞?\n continue\n freq[w] = freq.get(w, 0.0) + 1.0\n # 所有詞頻的總和\n total = sum(freq.values())\n # 將詞頻(TF)乘上逆向文件頻率(即IDF,inverse document frequency)\n for k in freq:\n kw = k.word if allowPOS and withFlag else k\n # 如果idf_freq字典中未記錄該詞,則以idf的中位數替代\n freq[k] *= self.idf_freq.get(kw, self.median_idf) / total\n # 現在freq變為詞彙出現機率乘上IDF\n\n if withWeight:\n # 回傳詞彙本身及其TF-IDF\n # itemgetter(1)的參數是鍵值對(因為是sorted(freq.items()))\n # 它回傳tuple的第1個元素(index從0開始),即字典的值\n # 所以sorted會依value來排序\n # reverse=True:由大至小排列\n tags = sorted(freq.items(), key=itemgetter(1), reverse=True)\n else:\n # 僅回傳詞彙本身\n # freq.__getitem__的參數是字典的鍵(因為是sorted(freq))\n # 它回傳的是字典的值,所達到的效用是sort by value\n tags = sorted(freq, key=freq.__getitem__, reverse=True)\n if topK:\n # 僅回傳前topK個\n return tags[:topK]\n else:\n return tags", "def get_wordnet_pos(treebank_tag):\n if treebank_tag.startswith('J'):\n return wordnet.ADJ\n elif treebank_tag.startswith('V'):\n return wordnet.VERB\n elif treebank_tag.startswith('N'):\n return wordnet.NOUN\n elif treebank_tag.startswith('R'):\n return wordnet.ADV\n else:\n # As default pos in lemmatization is Noun\n return wordnet.NOUN", "def _get_bag_of_pos_ngram(words, index, window_size, N):\n bos = DummyWord(pos=utils.BEGIN_OF_SENTENCE, upos=utils.BEGIN_OF_SENTENCE, dependency_relation=utils.BEGIN_OF_SENTENCE)\n eos = DummyWord(pos=utils.END_OF_SENTENCE, upos=utils.END_OF_SENTENCE, dependency_relation=utils.END_OF_SENTENCE)\n words = [bos] * (window_size + N) + words + [eos] * (window_size + N)\n index += (window_size + N)\n return [\n \"_\".join([_get_word_feature(w) for w in words[i:i+N]])\n for i in range(index-window_size, index+window_size+1)]", "def cstree_predict(value_dict, tree, order, sample, i, data):\n numerator = cstree_likelihood(sample, order, tree, data)\n order_of_var = order.index(i)\n samples = [np.array(list(sample[:order_of_var])+[val]+list(sample[order_of_var+1:]))\n for val in value_dict[i]]\n # sum_j P(X1=x1,...,Xi-1=xi-1,Xi+1=xi+1,...,Xn|Xi=j)P(Xi=j)\n denominator = sum([cstree_likelihood(s, order, tree, data) for s in samples])\n return numerator/denominator", "def predict_one(tree, sample):\n if tree['leaf']:\n return tree['class']\n\n else:\n if sample[tree['feature']] <= tree['split']:\n return predict_one(tree['left'], sample)\n else:\n return predict_one(tree['right'], sample)", "def get_ngrams(stats,s,t,i):\n #lemma ngrams\n ngram_sizes = [\"bi\", \"tri\"]\n for ngram_size in ngram_sizes:\n lm_ngram = get_lemma_ngrams(s, t, i, ngram_size)\n if lm_ngram:\n put_feature_value_list(stats,\"lemma_\" + ngram_size + \"gr\", lm_ngram)\n\n #POS and deprel bigrams\n if i < s.length-1:\n put_feature_value_list(stats,\"deprels_bigr\", (t.deprel,s.nodes[i+1].deprel))\n put_feature_value_list(stats,\"pos_bigr\", (t.pos,s.nodes[i+1].pos))\n \n #POS and deprel trigrams\n if i < s.length-2:\n put_feature_value_list(stats,\"deprels_trigr\", (t.deprel, s.nodes[i+1].deprel, s.nodes[i+2].deprel))\n put_feature_value_list(stats,\"pos_trigr\", (t.pos, s.nodes[i+1].pos, s.nodes[i+2].pos))\n\n return stats", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n\n return tag_dict.get(tag, wordnet.NOUN)", "def _classify(tree, x):\n # YOUR CODE HERE\n # begin answer\n feature_name=list(tree.keys())[0] #first element\n secondDict=tree[feature_name] \n key=x.loc[feature_name] #extract value from x\n for key_val in secondDict:\n feature_val=key_val[0]\n valueOfKey=secondDict[(feature_val, key>=feature_val)]\n if isinstance(valueOfKey,dict):\n label=_classify(valueOfKey,x)\n else:\n label=valueOfKey\n return label\n # end answer", "def get_ngram_features(train_data, test_data):\n print(\"getting ngram features\")\n ngram_vectorizer = CountVectorizer(ngram_range = (1, 2))\n ngram_vectorizer = ngram_vectorizer.fit(train_data)\n return ngram_vectorizer.transform(train_data), ngram_vectorizer.transform(test_data)", "def processFeature(prevWord, word, vector):\n \n # We add feature whether it exists or not\n unigram, exists = vector.getUnigram(prevWord)\n if not exists:\n vector.addUnigram(prevWord)\n \n \n bigram, exists = vector.getBigram(prevWord, word)\n if not exists:\n vector.addBigram(prevWord, word)", "def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}", "def preprocess(self, sequence, word2idx):\n context = [word2idx[word]\n if word in word2idx else UNK_token for word in sequence.split()]\n context = torch.Tensor(context)\n return context" ]
[ "0.6383279", "0.6254126", "0.61910975", "0.6125874", "0.5859489", "0.576097", "0.5751471", "0.57465875", "0.5686106", "0.5654718", "0.5606234", "0.55766785", "0.5484626", "0.54041994", "0.5392085", "0.53891397", "0.53711605", "0.53593254", "0.53128034", "0.5308846", "0.5308494", "0.53083843", "0.52953917", "0.52748525", "0.52700764", "0.52683014", "0.5249804", "0.52264065", "0.5225112", "0.5210038", "0.5203358", "0.5190545", "0.5173251", "0.5159667", "0.51594174", "0.5154774", "0.51327413", "0.51287", "0.5121672", "0.5118072", "0.5113387", "0.5110904", "0.5108216", "0.51008373", "0.5098176", "0.5096545", "0.5092731", "0.509151", "0.50912195", "0.5088825", "0.50870883", "0.5085602", "0.50846976", "0.5082889", "0.5080889", "0.5073862", "0.5072913", "0.5072208", "0.5060459", "0.50599307", "0.50591546", "0.5056473", "0.50557166", "0.50533736", "0.50497884", "0.5047759", "0.50464606", "0.5039027", "0.5030843", "0.50218093", "0.50212497", "0.50147164", "0.5013709", "0.5012236", "0.5005748", "0.5004671", "0.5001592", "0.4993279", "0.49874905", "0.49872726", "0.49831432", "0.49815193", "0.4980691", "0.4980157", "0.49775335", "0.49750897", "0.49734664", "0.49645635", "0.495955", "0.49578807", "0.49578807", "0.49578807", "0.49578807", "0.49578807", "0.49578807", "0.49575278", "0.4955529", "0.49530998", "0.49437052", "0.49403524" ]
0.52241606
29
Context processor to fetch community stats from Django people and Django packages. This caches the resulting dictionary to lower the chance of overwhelming those services.
def community_stats(request): stats = cache.get(STATS_CACHE_KEY, None) if not stats: stats = fetch(PEOPLE_STATS_URL) packages_data = fetch(PACKAGES_STATS_URL) if 'meta' in packages_data: stats.update({'packages': packages_data['meta']['total_count']}) stats = {'community_stats': stats} cache.add(STATS_CACHE_KEY, stats, 60 * 60 * 12) # for half a day return stats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def each_context(self, request):\n script_name = request.META['SCRIPT_NAME']\n site_url = script_name if self.site_url == '/' and script_name else self.site_url\n # 把用户的groupID传给template\n # if Group.objects.filter(id = request.user.id):\n # group_context = Group.objects.get(id = request.user.id).id\n # else:\n # group_context = 0\n # group_context = [0, ]\n # if not isinstance(request.user,AnonymousUser):\n try:\n groups = Group.objects.filter(user=request.user)\n if groups:\n group_context = [i.name for i in groups]\n else:\n if request.user.is_superuser:\n group_context = [1, ]\n else:\n # 此类用户没有分组\n group_context = [0, ]\n except:\n group_context = [0, ]\n\n return {\n 'site_title': self.site_title,\n 'site_header': self.site_header,\n 'site_url': site_url,\n 'has_permission': self.has_permission(request),\n 'group_id': group_context,\n 'available_apps': self.get_app_list(request),\n }", "def _compute_global_stats():\n global_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Check how many HITs have been completed. We now consider a HIT to be\n # completed once it has been annotated by one or more annotators.\n #\n # Before we required `hit.users.count() >= 3` for greater overlap.\n hits_completed = HIT.objects.filter(mturk_only=False, completed=True).count()\n \n # Check any remaining active HITs which are not yet marked complete.\n for hit in HIT.objects.filter(active=True, mturk_only=False, completed=False):\n if hit.users.count() >= 1:\n hits_completed = hits_completed + 1\n hit.completed = True\n hit.save()\n \n # Compute remaining HITs for all language pairs.\n hits_remaining = HIT.compute_remaining_hits()\n \n # Compute number of results contributed so far.\n ranking_results = RankingResult.objects.filter(\n item__hit__completed=True, item__hit__mturk_only=False)\n \n from math import factorial\n system_comparisons = 0\n for result in ranking_results:\n result.reload_dynamic_fields()\n # TODO: this implicitly counts A=B comparisons for multi systems.\n # Basically, inflating the number of pairwise comparisons... Fix!\n combinations = factorial(result.systems)/(factorial(result.systems-2) * 2) if result.systems > 2 else 0\n system_comparisons = system_comparisons + combinations\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # Compute average/total duration over all results.\n durations = RankingResult.objects.all().values_list('duration', flat=True)\n total_time = sum([datetime_to_seconds(x) for x in durations])\n avg_time = total_time / float(hits_completed or 1)\n avg_user_time = total_time / float(3 * hits_completed or 1)\n \n global_stats.append(('Users', len(wmt16_users)))\n global_stats.append(('Groups', len(groups)))\n global_stats.append(('HITs completed', '{0:,}'.format(hits_completed)))\n global_stats.append(('HITs remaining', '{0:,}'.format(hits_remaining)))\n global_stats.append(('Ranking results', '{0:,}'.format(ranking_results.count())))\n global_stats.append(('System comparisons', '{0:,}'.format(system_comparisons)))\n global_stats.append(('Average duration (per HIT)', seconds_to_timedelta(avg_time)))\n global_stats.append(('Average duration (per task)', seconds_to_timedelta(avg_user_time)))\n global_stats.append(('Total duration', seconds_to_timedelta(total_time)))\n \n # Create new status data snapshot\n TimedKeyValueData.update_status_if_changed('users', str(len(wmt16_users)))\n TimedKeyValueData.update_status_if_changed('groups', str(len(groups)))\n TimedKeyValueData.update_status_if_changed('hits_completed', str(hits_completed))\n TimedKeyValueData.update_status_if_changed('hits_remaining', str(hits_remaining))\n TimedKeyValueData.update_status_if_changed('ranking_results', str(ranking_results.count()))\n TimedKeyValueData.update_status_if_changed('system_comparisons', str(system_comparisons))\n TimedKeyValueData.update_status_if_changed('duration_per_hit', str(seconds_to_timedelta(avg_time)))\n TimedKeyValueData.update_status_if_changed('duration_per_task', str(seconds_to_timedelta(avg_user_time)))\n TimedKeyValueData.update_status_if_changed('duration_total', str(seconds_to_timedelta(total_time)))\n \n return global_stats", "def make_common_context(self):\r\n\r\n self.datatable = {}\r\n\r\n self.datatable = dict(header=[_('Statistic'), _('Value')],\r\n title=_('Site statistics'))\r\n self.datatable['data'] = [[_('Total number of users'),\r\n User.objects.all().count()]]\r\n\r\n self.msg += u'<h2>{0}</h2>'.format(\r\n _('Courses loaded in the modulestore'))\r\n self.msg += u'<ol>'\r\n for course in self.get_courses():\r\n self.msg += u'<li>{0} ({1})</li>'.format(\r\n escape(course.id.to_deprecated_string()), course.location.to_deprecated_string())\r\n self.msg += u'</ol>'", "def common_context(request):\n c = {\n 'lessons': get_lesson_numbers(),\n }\n return c", "def achieve_viewall(request):\n is_loggedin, username = get_session_variables(request)\n contrib_list = []\n article_list = []\n gsoc_list = []\n speaker_list = []\n intern_list = []\n contest_participant_list = []\n icpc_participants_regional_list = []\n icpc_participants_final_list = []\n\n contrib_list_all = Contribution.objects.all()\n contrib_list = Contribution.objects.all()[:5]\n article_list = Article.objects.all()[:5]\n gsoc_list = Gsoc.objects.all()[:5]\n speaker_list = Speaker.objects.all()[:5]\n intern_list = Intern.objects.all()[:5]\n contest_list = Contest_won.objects.all()[:5]\n\n \n contrib_org = {}\n if contrib_list_all:\n for contrib in contrib_list_all:\n if contrib.org_name not in contrib_org.keys():\n contrib_org[contrib.org_name] = 0\n\n for contrib in contrib_list:\n contrib_org[contrib.org_name] += 1\n\n if contest_list:\t\n contest_participant_list = []\n\tfor contest_won_obj in contest_list:\t\n\t c_id = contest_won_obj.contest_id\n\t c_p_objs = Contest_won_participant.objects.filter(contest_id = c_id)\n\t contest_participant_list.extend(c_p_objs)\n \n icpc_list_regionals = ACM_ICPC_detail.objects.filter(level='regional').order_by('ranking')[:2]\n if icpc_list_regionals:\n for icpc_obj in icpc_list_regionals:\n team = icpc_obj.team_name\n member1 = [icpc_obj.participant1_name, \\\n get_username_from_email(icpc_obj.participant1_email)]\n\n member2 = [icpc_obj.participant2_name, \\\n get_username_from_email(icpc_obj.participant2_email)]\n\n member3 = [icpc_obj.participant3_name, \\\n get_username_from_email(icpc_obj.participant3_email)]\n\n icpc_participant_list = [icpc_obj, member1,member2,member3]\n icpc_participants_regional_list.append(icpc_participant_list)\n\n icpc_list_finals = ACM_ICPC_detail.objects.filter(level='finals').order_by('ranking')[:2]\n if icpc_list_finals:\n for icpc_obj in icpc_list_finals:\n team = icpc_obj.team_name\n member1 = [icpc_obj.participant1_name, \\\n get_username_from_email(icpc_obj.participant1_email)]\n\n member2 = [icpc_obj.participant2_name, \\\n get_username_from_email(icpc_obj.participant2_email)]\n\n member3 = [icpc_obj.participant3_name, \\\n get_username_from_email(icpc_obj.participant3_email)]\n\n icpc_participant_list = [icpc_obj, member1,member2,member3]\n icpc_participants_final_list.append(icpc_participant_list)\n\n return render_to_response('achievement/achievement_viewall.html',\\\n\t\t{'username':username, \\\n 'is_loggedin':is_loggedin, \\\n 'contrib_list':contrib_list, \\\n 'contrib_org':contrib_org,\\\n 'article_list':article_list, \\\n 'gsoc_list':gsoc_list, \\\n 'speaker_list':speaker_list, \\\n 'intern_list':intern_list, \\\n 'contest_list':contest_list, \\\n 'contest_participant_list':contest_participant_list, \\\n 'icpc_participants_final_list':icpc_participants_final_list, \\\n 'icpc_participants_regional_list':icpc_participants_regional_list}, \\\n RequestContext(request))", "def getContext(self):\n context = {}\n service = backendservices(self.user_data)\n # StochKit jobs\n all_stochkit_jobs = db.GqlQuery(\"SELECT * FROM StochKitJobWrapper WHERE user_id = :1\", self.user.user_id())\n all_jobs = []\n if all_stochkit_jobs != None:\n jobs = list(all_stochkit_jobs.run())\n jobs = sorted(jobs,\n key=lambda x:\n (datetime.datetime.strptime(x.startTime, '%Y-%m-%d-%H-%M-%S')\n if hasattr(x, 'startTime') and x.startTime != None else datetime.datetime.now()),\n reverse=True)\n for number, job in enumerate(jobs):\n number = len(jobs) - number\n all_jobs.append(self.__process_getJobStatus(service,job, number))\n context['all_jobs']=all_jobs\n\n # Sensitivity\n allSensJobs = []\n allSensQuery = db.GqlQuery(\"SELECT * FROM SensitivityJobWrapper WHERE user_id = :1\", self.user.user_id())\n if allSensQuery != None:\n jobs = list(allSensQuery.run())\n jobs = sorted(jobs,\n key=lambda x:\n (datetime.datetime.strptime(x.startTime, '%Y-%m-%d-%H-%M-%S')\n if hasattr(x, 'startTime') and x.startTime != None else ''),\n reverse = True)\n for number, job in enumerate(jobs):\n number = len(jobs) - number\n allSensJobs.append(self.__process_getJobStatus(service,job, number))\n context['allSensJobs']=allSensJobs\n\n\n # Export\n allExportJobs = []\n exportJobsQuery = db.GqlQuery(\"SELECT * FROM ExportJobWrapper WHERE user_id = :1\", self.user.user_id())\n if exportJobsQuery != None:\n jobs = list(exportJobsQuery.run())\n jobs = sorted(jobs, key = lambda x : (datetime.datetime.strptime(x.startTime, '%Y-%m-%d-%H-%M-%S') if hasattr(x, 'startTime') and x.startTime != None else ''), reverse = True)\n for number, job in enumerate(jobs):\n number = len(jobs) - number\n allExportJobs.append({ \"startTime\" : job.startTime,\n \"status\" : job.status,\n \"number\" : number,\n \"outData\" : os.path.basename(job.outData if job.outData else \"\"),\n \"id\" : job.key().id()})\n context['allExportJobs'] = allExportJobs\n\n # Parameter Estimation\n allParameterJobs = []\n allParameterJobsQuery = db.GqlQuery(\"SELECT * FROM StochOptimJobWrapper WHERE user_id = :1\", self.user.user_id())\n if allParameterJobsQuery != None:\n jobs = list(allParameterJobsQuery.run())\n jobs = sorted(jobs, key = lambda x : (datetime.datetime.strptime(x.startTime, '%Y-%m-%d-%H-%M-%S') if hasattr(x, 'startTime') and x.startTime != None else ''), reverse = True)\n for number, job in enumerate(jobs):\n number = len(jobs) - number\n allParameterJobs.append(self.__process_getJobStatus(service,job, number))\n context['allParameterJobs'] = allParameterJobs\n\n #Spatial Jobs\n allSpatialJobs = []\n allSpatialJobsQuery = db.GqlQuery(\"SELECT * FROM SpatialJobWrapper WHERE user_id = :1\", self.user.user_id())\n if allSpatialJobsQuery != None:\n jobs = list(allSpatialJobsQuery.run())\n jobs = sorted(jobs,\n key=lambda x : (datetime.datetime.strptime(x.startTime, '%Y-%m-%d-%H-%M-%S')\n if hasattr(x, 'startTime') and x.startTime != None else ''),\n reverse = True)\n for number, job in enumerate(jobs):\n number = len(jobs) - number\n allSpatialJobs.append(self.__process_getJobStatus(service,job, number))\n context['allSpatialJobs'] = allSpatialJobs\n \n return context", "def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(**kwargs)\n context['list_result'] = self.queryset\n get_student_ranking(context)\n get_college_ranking(context)\n return context", "def get_context(self, request, *args, **kwargs):\n program_page_qset = (\n ProgramPage.objects.live()\n .filter(program__live=True)\n .order_by(\"id\")\n .select_related(\"program\", \"thumbnail_image\")\n .prefetch_related(\"program__courses__courseruns\")\n )\n course_page_qset = (\n CoursePage.objects.live()\n .filter(course__live=True)\n .order_by(\"id\")\n .select_related(\"course\", \"thumbnail_image\")\n .prefetch_related(\"course__courseruns\")\n )\n external_course_qset = (\n ExternalCoursePage.objects.live()\n .order_by(\"title\")\n .select_related(\"thumbnail_image\")\n )\n\n external_program_qset = (\n ExternalProgramPage.objects.live()\n .order_by(\"title\")\n .select_related(\"thumbnail_image\")\n )\n\n featured_product = program_page_qset.filter(\n featured=True\n ) or course_page_qset.filter(featured=True)\n all_pages, program_pages, course_pages = filter_and_sort_catalog_pages(\n program_page_qset,\n course_page_qset,\n external_course_qset,\n external_program_qset,\n )\n return dict(\n **super().get_context(request),\n **get_base_context(request),\n all_pages=all_pages,\n program_pages=program_pages,\n course_pages=course_pages,\n featured_product=featured_product.first(),\n default_image_path=DEFAULT_COURSE_IMG_PATH,\n hubspot_portal_id=settings.HUBSPOT_CONFIG.get(\"HUBSPOT_PORTAL_ID\"),\n hubspot_new_courses_form_guid=settings.HUBSPOT_CONFIG.get(\n \"HUBSPOT_NEW_COURSES_FORM_GUID\"\n ),\n )", "def minority_aggregation_as_json(request):\n msa_target_lma_sum = 0\n msa_target_mma_sum = 0\n msa_target_hma_sum = 0\n\n msa_peer_lma_sum = 0\n msa_peer_mma_sum = 0\n msa_peer_hma_sum = 0\n\n\n msa_stats = {}\n\n lar_data = loan_originations_as_json(request)\n lender = get_object_or_404(Institution, pk=request.GET.get('lender'))\n metro = get_object_or_404(Geo, geo_type=Geo.METRO_TYPE, geoid=request.GET.get('metro'))\n peer_request = HttpRequest()\n peer_request.GET['lender'] = lender.institution_id\n peer_request.GET['metro']= metro.geoid\n peer_request.GET['peers'] = 'true'\n peer_lar_data = loan_originations_as_json(peer_request)\n\n msa_counties = Geo.objects.filter(geo_type=Geo.COUNTY_TYPE, cbsa=metro.cbsa, year=metro.year)\n county_stats = {}\n for county in msa_counties:\n county_tracts = Geo.objects.filter(geo_type=Geo.TRACT_TYPE, state=county.state, county=county.county, year=metro.year)\n minority_area_stats = get_minority_area_stats(lar_data, peer_lar_data, county_tracts)\n county_stats[county.geoid] = assemble_stats(*minority_area_stats)\n county_stats[county.geoid]['name'] = county.name\n #tally target msa counts\n msa_target_lma_sum += county_stats[county.geoid]['lma']\n msa_target_mma_sum += county_stats[county.geoid]['mma']\n msa_target_hma_sum += county_stats[county.geoid]['hma']\n #tally peer msa counts\n msa_peer_lma_sum += county_stats[county.geoid]['peer_lma']\n msa_peer_mma_sum += county_stats[county.geoid]['peer_mma']\n msa_peer_hma_sum += county_stats[county.geoid]['peer_hma']\n #msa\n msa_minority_area_stats = (msa_target_lma_sum, msa_target_mma_sum, msa_target_hma_sum, msa_peer_lma_sum, msa_peer_mma_sum, msa_peer_hma_sum)\n msa_stats = assemble_stats(*msa_minority_area_stats)\n \n return {\n 'msa': msa_stats,\n 'counties': county_stats,\n }", "def userstats(request):\r\n with ReqAuthorize(request):\r\n user = UserMgr.get(username=request.user.username)\r\n return {\r\n 'user': user,\r\n 'username': user.username,\r\n }", "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n repositories = Repository.objects.select_related(\n 'organization',\n ).filter(\n is_visible=True,\n contribution__contributor=self.object,\n ).annotate(\n commits=Count('id', filter=Q(contribution__type='cit')),\n additions=Coalesce(Sum('contribution__stats__additions'), 0),\n deletions=Coalesce(Sum('contribution__stats__deletions'), 0),\n pull_requests=Count(\n 'contribution', filter=Q(contribution__type='pr'),\n ),\n issues=Count('contribution', filter=Q(contribution__type='iss')),\n comments=Count('contribution', filter=Q(contribution__type='cnt')),\n ).order_by('organization', 'name')\n\n context['repositories'] = repositories\n context['contributions_for_year'] = (\n self.object.contribution_set.for_year()\n )\n return context", "def GatherBaseData(self, mr, nonce):\n project = mr.project\n\n project_summary = ''\n project_alert = None\n project_read_only = False\n project_home_page = ''\n project_thumbnail_url = ''\n if project:\n project_summary = project.summary\n project_alert = _CalcProjectAlert(project)\n project_read_only = project.read_only_reason\n project_home_page = project.home_page\n project_thumbnail_url = tracker_views.LogoView(project).thumbnail_url\n\n with work_env.WorkEnv(mr, self.services) as we:\n is_project_starred = False\n project_view = None\n if mr.project:\n if permissions.UserCanViewProject(\n mr.auth.user_pb, mr.auth.effective_ids, mr.project):\n is_project_starred = we.IsProjectStarred(mr.project_id)\n # TODO(jrobbins): should this be a ProjectView?\n project_view = template_helpers.PBProxy(mr.project)\n\n grid_x_attr = None\n grid_y_attr = None\n hotlist_view = None\n if mr.hotlist:\n users_by_id = framework_views.MakeAllUserViews(\n mr.cnxn, self.services.user,\n features_bizobj.UsersInvolvedInHotlists([mr.hotlist]))\n hotlist_view = hotlist_views.HotlistView(\n mr.hotlist, mr.perms, mr.auth, mr.viewed_user_auth.user_id,\n users_by_id, self.services.hotlist_star.IsItemStarredBy(\n mr.cnxn, mr.hotlist.hotlist_id, mr.auth.user_id))\n grid_x_attr = mr.x.lower()\n grid_y_attr = mr.y.lower()\n\n app_version = os.environ.get('CURRENT_VERSION_ID')\n\n viewed_username = None\n if mr.viewed_user_auth.user_view:\n viewed_username = mr.viewed_user_auth.user_view.username\n\n issue_entry_url = 'entry'\n config = None\n if mr.project_id and self.services.config:\n with mr.profiler.Phase('getting config'):\n config = self.services.config.GetProjectConfig(mr.cnxn, mr.project_id)\n grid_x_attr = (mr.x or config.default_x_attr).lower()\n grid_y_attr = (mr.y or config.default_y_attr).lower()\n issue_entry_url = _LoginOrIssueEntryURL(mr, config)\n\n viewing_self = mr.auth.user_id == mr.viewed_user_auth.user_id\n offer_saved_queries_subtab = (\n viewing_self or mr.auth.user_pb and mr.auth.user_pb.is_site_admin)\n\n login_url = _SafeCreateLoginURL(mr)\n logout_url = _SafeCreateLogoutURL(mr)\n logout_url_goto_home = users.create_logout_url('/')\n version_base = _VersionBaseURL(mr.request)\n\n base_data = {\n # EZT does not have constants for True and False, so we pass them in.\n 'True': ezt.boolean(True),\n 'False': ezt.boolean(False),\n\n 'local_mode': ezt.boolean(settings.local_mode),\n\n 'site_name': settings.site_name,\n 'show_search_metadata': ezt.boolean(False),\n 'page_template': self._PAGE_TEMPLATE,\n 'main_tab_mode': self._MAIN_TAB_MODE,\n 'project_summary': project_summary,\n 'project_home_page': project_home_page,\n 'project_thumbnail_url': project_thumbnail_url,\n\n 'hotlist_id': mr.hotlist_id,\n 'hotlist': hotlist_view,\n\n 'hostport': mr.request.host,\n 'absolute_base_url': '%s://%s' % (mr.request.scheme, mr.request.host),\n 'project_home_url': None,\n 'link_rel_canonical': None, # For specifying <link rel=\"canonical\">\n 'projectname': mr.project_name,\n 'project': project_view,\n 'project_is_restricted': ezt.boolean(_ProjectIsRestricted(mr)),\n 'offer_contributor_list': ezt.boolean(\n permissions.CanViewContributorList(mr, mr.project)),\n 'logged_in_user': mr.auth.user_view,\n 'form_token': None, # Set to a value below iff the user is logged in.\n 'form_token_path': None,\n 'token_expires_sec': None,\n 'xhr_token': None, # Set to a value below iff the user is logged in.\n 'flag_spam_token': None,\n 'nonce': nonce,\n 'perms': mr.perms,\n 'warnings': mr.warnings,\n 'errors': mr.errors,\n\n 'viewed_username': viewed_username,\n 'viewed_user': mr.viewed_user_auth.user_view,\n 'viewed_user_pb': template_helpers.PBProxy(\n mr.viewed_user_auth.user_pb),\n 'viewing_self': ezt.boolean(viewing_self),\n 'viewed_user_id': mr.viewed_user_auth.user_id,\n 'offer_saved_queries_subtab': ezt.boolean(offer_saved_queries_subtab),\n\n 'currentPageURL': mr.current_page_url,\n 'currentPageURLEncoded': mr.current_page_url_encoded,\n 'login_url': login_url,\n 'logout_url': logout_url,\n 'logout_url_goto_home': logout_url_goto_home,\n 'continue_issue_id': mr.continue_issue_id,\n 'feedback_email': settings.feedback_email,\n 'category_css': None, # Used to specify a category of stylesheet\n 'category2_css': None, # specify a 2nd category of stylesheet if needed.\n 'page_css': None, # Used to add a stylesheet to a specific page.\n\n 'can': mr.can,\n 'query': mr.query,\n 'colspec': None,\n 'sortspec': mr.sort_spec,\n\n # Options for issuelist display\n 'grid_x_attr': grid_x_attr,\n 'grid_y_attr': grid_y_attr,\n 'grid_cell_mode': mr.cells,\n 'grid_mode': None,\n 'list_mode': None,\n 'chart_mode': None,\n\n 'issue_entry_url': issue_entry_url,\n 'is_cross_project': ezt.boolean(False),\n\n # for project search (some also used in issue search)\n 'start': mr.start,\n 'num': mr.num,\n 'groupby': mr.group_by_spec,\n 'q_field_size': (\n min(framework_constants.MAX_ARTIFACT_SEARCH_FIELD_SIZE,\n max(framework_constants.MIN_ARTIFACT_SEARCH_FIELD_SIZE,\n len(mr.query) + framework_constants.AUTOSIZE_STEP))),\n 'mode': None, # Display mode, e.g., grid mode.\n 'ajah': mr.ajah,\n 'table_title': mr.table_title,\n\n 'alerts': alerts.AlertsView(mr), # For alert.ezt\n 'project_alert': project_alert,\n\n 'title': None, # First part of page title\n 'title_summary': None, # Appended to title on artifact detail pages\n\n # TODO(jrobbins): make sure that the templates use\n # project_read_only for project-mutative actions and if any\n # uses of read_only remain.\n 'project_read_only': ezt.boolean(project_read_only),\n 'site_read_only': ezt.boolean(settings.read_only),\n 'banner_time': servlet_helpers.GetBannerTime(settings.banner_time),\n 'read_only': ezt.boolean(settings.read_only or project_read_only),\n 'site_banner_message': settings.banner_message,\n 'robots_no_index': None,\n 'analytics_id': settings.analytics_id,\n\n 'is_project_starred': ezt.boolean(is_project_starred),\n\n 'version_base': version_base,\n 'app_version': app_version,\n 'gapi_client_id': settings.gapi_client_id,\n 'viewing_user_page': ezt.boolean(False),\n 'old_ui_url': None,\n\n 'is_member': ezt.boolean(False),\n }\n\n if mr.project:\n base_data['project_home_url'] = '/p/%s' % mr.project_name\n\n # Always add xhr-xsrf token because even anon users need some\n # pRPC methods, e.g., autocomplete, flipper, and charts.\n base_data['token_expires_sec'] = xsrf.TokenExpiresSec()\n base_data['xhr_token'] = xsrf.GenerateToken(\n mr.auth.user_id, xsrf.XHR_SERVLET_PATH)\n # Always add other anti-xsrf tokens when the user is logged in.\n if mr.auth.user_id:\n form_token_path = self._FormHandlerURL(mr.request.path)\n base_data['form_token'] = xsrf.GenerateToken(\n mr.auth.user_id, form_token_path)\n base_data['form_token_path'] = form_token_path\n\n return base_data", "def get_context_data(self, **kwargs):\n context = super(ProjectView, self).get_context_data(**kwargs)\n # only load if self.request.user == founder\n if self.request.user.id is self.get_object().founder.id:\n context['match_list'] = Match.objects.filter(project=self.object.id).order_by('-rank')\n return context", "def stats(self):\r\n\t\tdata = self._get('global/', query=None)\r\n\t\treturn data", "def _get_communities(self):\n return self.__communities", "def _context(self):\n domain = Site.objects.get_current()\n scheme = 'http' if settings.DEBUG else 'https'\n return {\n 'event': self.event,\n 'documents': self.documents.all(),\n 'mchp_base_url': '{}://{}'.format(scheme, domain)\n }", "def index():\r\n\r\n title = \"Global Statistics\"\r\n\r\n n_auth = n_auth_users()\r\n\r\n n_anon = n_anon_users()\r\n\r\n n_total_users = n_anon + n_auth\r\n\r\n n_published_apps = cached_apps.n_published()\r\n n_draft_apps = cached_apps.n_draft()\r\n n_total_apps = n_published_apps + n_draft_apps\r\n\r\n n_tasks = n_tasks_site()\r\n\r\n n_task_runs = n_task_runs_site()\r\n\r\n top5_apps_24_hours = get_top5_apps_24_hours()\r\n\r\n top5_users_24_hours = get_top5_users_24_hours()\r\n\r\n locs = get_locs()\r\n\r\n show_locs = False\r\n if len(locs) > 0:\r\n show_locs = True\r\n\r\n stats = dict(n_total_users=n_total_users, n_auth=n_auth, n_anon=n_anon,\r\n n_published_apps=n_published_apps,\r\n n_draft_apps=n_draft_apps,\r\n n_total_apps=n_total_apps,\r\n n_tasks=n_tasks,\r\n n_task_runs=n_task_runs)\r\n\r\n users = dict(label=\"User Statistics\",\r\n values=[\r\n dict(label='Anonymous', value=[0, n_anon]),\r\n dict(label='Authenticated', value=[0, n_auth])])\r\n\r\n apps = dict(label=\"Apps Statistics\",\r\n values=[\r\n dict(label='Published', value=[0, n_published_apps]),\r\n dict(label='Draft', value=[0, n_draft_apps])])\r\n\r\n tasks = dict(label=\"Task and Task Run Statistics\",\r\n values=[\r\n dict(label='Tasks', value=[0, n_tasks]),\r\n dict(label='Answers', value=[1, n_task_runs])])\r\n\r\n return render_template('/stats/global.html', title=title,\r\n users=json.dumps(users),\r\n apps=json.dumps(apps),\r\n tasks=json.dumps(tasks),\r\n locs=json.dumps(locs),\r\n show_locs=show_locs,\r\n top5_users_24_hours=top5_users_24_hours,\r\n top5_apps_24_hours=top5_apps_24_hours,\r\n stats=stats)", "def db_stats(self):\n return { \"search_and_get\": self.db_search_and_get }", "def statistics(self, request):\n context = {\"uid\": request.session.get(\"uid\", None),\n \"is_auth\": request.session.get(\"is_auth\", False),\n \"is_daily\": request.session.get(\"is_daily\", False),\n \"is_leader\": request.session.get(\"is_leader\", False),\n }\n\n context.update({\"statistics\": self.utils.get_statistics()})\n # \"dates\": self.utils.get_artefacts_dates()})\n return render_to_response(\"crisis/statistics.html\",\n context,\n context_instance=RequestContext(request))", "def get_context_data(self, **kwargs):\n context = super(IndexView, self).get_context_data(**kwargs)\n context['questions'] = models.Question.objects.count()\n context['answers'] = models.Answer.objects.count()\n context['users'] = User.objects.count()\n return context", "def stats(request):\n stats = []\n activeProject = None\n activity = get_activity()\n if activity:\n activeProject = activity.project\n projects = Project.objects.filter(company__id=1).order_by('name')\n for project in projects:\n isCurrent = (activeProject != None) and (project.id == activeProject.id)\n # If this is NOT the currently selected project...\n if (not isCurrent) or (not activeProject):\n # If this project is password protected, skip it.\n if (project.password != None) and (len(project.password) > 0):\n continue\n sessions = Session.objects.filter(project=project,\n endtime__gt=F('starttime') +\n timedelta(minutes=3))\n files = File.objects.filter(project=project)\n fileactions = Fileaction.objects.filter(file__in=files)\n events = Event.objects.filter(session__in=sessions)\n sQuery = {\n 'avg': 'SUM(TIMESTAMPDIFF(SECOND, starttime, endtime)) / COUNT(*)',\n 'min': 'MIN(TIMESTAMPDIFF(SECOND, starttime, endtime))',\n 'max': 'MAX(TIMESTAMPDIFF(SECOND, starttime, endtime))',\n 'count': 'COUNT(*)'\n }\n sessions = sessions.extra(select=sQuery)\n sessions = sessions.values_list('avg', 'min', 'max', 'count').get()\n session_average_duration = 0\n session_min_duration = 0\n session_max_duration = 0\n if sessions[0] is not None:\n session_average_duration = int(sessions[0])\n if sessions[1] is not None:\n session_min_duration = int(sessions[1])\n if sessions[2] is not None:\n session_max_duration = int(sessions[2])\n session_count = sessions[3]\n statsdata = {\n 'selected': isCurrent,\n 'name': project.name,\n 'session_average_duration': session_average_duration,\n 'session_min_duration': session_min_duration,\n 'session_max_duration': session_max_duration,\n 'session_count': session_count,\n 'file_count': files.count(),\n 'fileaction_count': fileactions.count(),\n 'event_count': events.count()\n }\n stats.append(statsdata)\n return render_to_response(\n 'stats.html',\n {\n 'stats': stats,\n 'tab': 'stats'\n },\n context_instance=RequestContext(request)\n )", "def collect():\n\n stats = {}\n for feed in Feed.objects:\n try:\n logger.info('Fetching from {0}...'.format(feed.ext_url))\n new_articles = fetch(feed)\n stats[feed.ext_url] = len(new_articles)\n\n except SAXException as e:\n if feed.errors is None:\n feed.errors = 0\n\n # Error with the feed, make a note.\n logger.info('Error fetching from {0}.'.format(feed.ext_url))\n feed.errors += 1\n feed.save()\n pretty_stats = json.dumps(stats, sort_keys=True, indent=4)\n notify('Corpora collection complete.', 'Total article count: {0}\\n\\nResults for this pass:\\n{1}'.format(len(Article.objects), pretty_stats))", "def get_public_crest_context(self):\n\n # use anonymous PyCrest as documented at http://pycrest.readthedocs.org/\n public_crest = pycrest.EVE()\n public_crest()\n\n tranquility_user_count = public_crest.userCounts.eve\n\n # fetch incursions and make them usable inside a Django template\n incursions = []\n for thing_that_looks_like_a_dict_but_isnt in public_crest.incursions().items:\n incursion = {}\n for key, value in thing_that_looks_like_a_dict_but_isnt._dict.iteritems():\n incursion[key] = value._dict if hasattr(value, '_dict') else value\n incursions.append(incursion)\n return {\n 'user_count': tranquility_user_count,\n 'incursions': incursions,\n }", "def _get_context(self):\n\n\t\taffecteds = self.folk_set.all().select_related('folk', 'mission_grid')\n\t\tgrids = {}\n\t\tfor affected in affecteds:\n\t\t\tif affected.mission_grid.slug in grids:\n\t\t\t\tgrids[affected.mission_grid.slug].append(affected.folk)\n\t\t\telse:\n\t\t\t\tgrids[affected.mission_grid.slug] = [affected.folk]\n\n\t\tcontext = {\n\t\t\t'pendingmission': self,\n\t\t\t'grids': grids,\n\t\t\t'affected': [a.folk for a in affecteds],\n\t\t\t'value': self.value,\n\t\t\t'target': self.target\n\t\t}\n\n\t\treturn context", "def resolve_paginated_community(self, info, page): # Change news to community\n page_size = 30\n # qs = News.objects.filter(reply=False)\n qs = Community.objects.filter(reply=False)\n # return paginate_data(qs, page_size, page, NewsPaginatedType)\n return paginate_data(qs, page_size, page, CommunityPaginatedType)", "def context(self) -> dict:\n\n try:\n context = cfr.train.find(self.train_number)\n\n meta_description = self.request.locale.gettext((\n 'Real-time updates for {rank}'\n '{number}, on the route \\'{route}\\'.'\n )).format(**context)\n\n context.update({\n 'meta': {\n 'page': '%s%s' % (context['rank'], context['number']),\n 'description': meta_description\n }\n })\n\n return context\n except cfr.exceptions.TrainNotFound:\n raise web.HTTPNotFound(reason='We couldn\\'t find that train')", "def cache_stats(request, template_name=\"admin/cache_stats.html\"):\n cache_stats = get_cache_stats()\n\n return render_to_response(template_name, RequestContext(request, {\n 'cache_hosts': cache_stats,\n 'cache_backend': cache.__module__,\n 'title': _(\"Server Cache\"),\n 'root_path': settings.SITE_ROOT + \"admin/db/\"\n }))", "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n language_code = get_language()\n # Changed since T278337: add filter to queryset before we build the partners\n # dictionary\n partner_filtered_list = MainPartnerFilter(\n self.request.GET, queryset=self.get_queryset(), language_code=language_code\n )\n context[\"filter\"] = partner_filtered_list\n\n user = self.request.user\n if user.is_authenticated:\n user = User.objects.select_related(\"editor\").get(pk=self.request.user.pk)\n context[\"user\"] = user\n context[\"editor\"] = user.editor\n partners_list = []\n partner_search_list = []\n for partner in partner_filtered_list.qs:\n partner_dict = {}\n partner_dict[\"pk\"] = partner.pk\n partner_dict[\"company_name\"] = partner.company_name\n try:\n partner_dict[\"partner_logo\"] = partner.logos.logo.url\n except ObjectDoesNotExist:\n partner_dict[\"partner_logo\"] = None\n partner_dict[\"is_not_available\"] = partner.is_not_available\n partner_dict[\"is_waitlisted\"] = partner.is_waitlisted\n new_tags = partner.new_tags\n # Getting tags from locale files\n translated_tags = get_tag_names(language_code, new_tags)\n partner_dict[\"tags\"] = translated_tags\n partner_dict[\"languages\"] = partner.get_languages\n # Obtaining translated partner description\n partner_short_description_key = \"{pk}_short_description\".format(\n pk=partner.pk\n )\n partner_description_key = \"{pk}_description\".format(pk=partner.pk)\n partner_descriptions = get_partner_description(\n language_code, partner_short_description_key, partner_description_key\n )\n\n partner_dict[\"short_description\"] = partner_descriptions[\n \"short_description\"\n ]\n partner_dict[\"description\"] = partner_descriptions[\"description\"]\n partners_list.append(partner_dict)\n if partner_descriptions[\"description\"]:\n partner_desc = bleach.clean(\n partner_descriptions[\"description\"],\n tags=[],\n strip=True,\n )\n else:\n partner_desc = \"\"\n\n if partner_descriptions[\"short_description\"]:\n partner_short_desc = bleach.clean(\n partner_descriptions[\"short_description\"],\n tags=[],\n strip=True,\n )\n else:\n partner_short_desc = \"\"\n\n partner_search_list.append(\n {\n \"partner_pk\": partner.pk,\n \"partner_name\": partner.company_name,\n \"partner_short_description\": partner_short_desc,\n \"partner_description\": partner_desc,\n }\n )\n context[\"partners_list\"] = partners_list\n context[\"partner_search_list\"] = partner_search_list\n\n return context", "def get_context_data(self) -> dict:\n return {\n \"site\": getattr(self, \"site\")\n }", "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n try:\n cluster_pk = int(kwargs['cluster_pk'][:6]) # afkappen voor de veiligheid\n cluster = (Cluster\n .objects\n .select_related('regio', 'regio__rayon')\n .get(pk=cluster_pk))\n except (ValueError, Cluster.DoesNotExist):\n raise Http404('Cluster niet gevonden')\n\n context['cluster'] = cluster\n context['regio'] = cluster.regio\n\n try:\n deelcomp_pk = int(kwargs['deelcomp_pk'][:6]) # afkappen voor de veiligheid\n deelcomp = (Regiocompetitie\n .objects\n .select_related('competitie')\n .get(pk=deelcomp_pk))\n except (ValueError, Regiocompetitie.DoesNotExist):\n raise Http404('Competitie niet gevonden')\n\n context['deelcomp'] = deelcomp\n\n context['rondes'] = planning_sorteer_weeknummers(\n RegiocompetitieRonde\n .objects\n .filter(regiocompetitie=deelcomp,\n cluster=cluster))\n\n for ronde in context['rondes']:\n ronde.wedstrijd_count = ronde.matches.count()\n # for\n\n # alleen de RCL mag de planning uitbreiden\n if self.rol_nu == Rollen.ROL_RCL and len(context['rondes']) < 16:\n context['url_nieuwe_week'] = reverse('CompLaagRegio:regio-cluster-planning',\n kwargs={'deelcomp_pk': deelcomp.pk,\n 'cluster_pk': cluster.pk})\n\n comp = deelcomp.competitie\n\n if self.rol_nu == Rollen.ROL_HWL:\n # TODO: deze terug verwijzing klopt niet helemaal meer. Zou Beheer Vereniging kunnen zijn als we een nieuw kaartje maken om de planning in te zien\n comp_url = reverse('Competitie:overzicht', kwargs={'comp_pk': comp.pk})\n else:\n comp_url = reverse('CompBeheer:overzicht', kwargs={'comp_pk': comp.pk})\n\n context['kruimels'] = (\n (reverse('Competitie:kies'), 'Bondscompetities'),\n (comp_url, comp.beschrijving.replace(' competitie', '')),\n (reverse('CompLaagRegio:regio-planning', kwargs={'deelcomp_pk': deelcomp.pk}), 'Planning'),\n (None, 'Cluster')\n )\n\n menu_dynamics(self.request, context)\n return context", "def __context(self):\r\n # TODO: do we need to include anything else?\r\n # TODO: do this with the django-settings-context-processor\r\n return {\r\n \"FEATURES\": settings.FEATURES,\r\n \"THEME_NAME\" : getattr(settings, \"THEME_NAME\", None),\r\n }", "def get_template_render_context(self):\n return {\n \"distribution\": self,\n \"distribution_numbers\": self.numbers,\n \"distribution_times\": self.times.all(),\n }", "def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)", "def global_stats(request):\n\n if request.user.is_authenticated:\n profile: IntervalsProfile = request.user.profile\n\n global_correct = 0\n global_answered = 0\n\n for r in profile.all_recent_results().values():\n global_correct += r.total_correct\n global_answered += r.total_completed\n\n print(global_correct, global_answered)\n\n return Response({\"global_correct\": global_correct,\n \"global_answered\": global_answered},\n status=status.HTTP_201_CREATED)\n return Response(status=status.HTTP_401_UNAUTHORIZED)", "def search_user_communities(self):\n hits = self.service.search_user_communities(\n identity=g.identity,\n params=resource_requestctx.args,\n es_preference=es_preference()\n )\n return hits.to_dict(), 200", "def get_results_from_aggregation_sources(self, context):", "def _extra_context(self):\r\n return {}", "def _initialize_context_caches(self):\n # Default is using global context cache\n self.energy_context_cache = cache.global_context_cache\n self.sampler_context_cache = cache.global_context_cache", "def mywo(request):\n context = RequestContext(request)\n \n user = request.user\n orgs = user.orgusers.get_query_set()\n \n events = Event.objects.filter(org__in=orgs)\n l = {}\n for org in orgs:\n l[org.name] = Event.objects.filter(org=org)\n \n #context['events'] = events\n context['events'] = l\n return render_to_response('mywo.html', context)", "def get_context(self) -> dict:\n # Add default context\n current_site = Site.objects.get_current()\n self.default_context.update({\"site\": current_site})\n return self.default_context", "def _get_statistics(self, cr, uid, ids, name, arg, context=None):\n Statistics = self.pool['mail.mail.statistics']\n results = dict.fromkeys(ids, False)\n for cid in ids:\n stat_ids = Statistics.search(cr, uid, [('mass_mailing_campaign_id', '=', cid)], context=context)\n stats = Statistics.browse(cr, uid, stat_ids, context=context)\n results[cid] = {\n 'total': len(stats),\n 'failed': len([s for s in stats if not s.scheduled is False and s.sent is False and not s.exception is False]),\n 'scheduled': len([s for s in stats if not s.scheduled is False and s.sent is False and s.exception is False]),\n 'sent': len([s for s in stats if not s.sent is False]),\n 'opened': len([s for s in stats if not s.opened is False]),\n 'replied': len([s for s in stats if not s.replied is False]),\n 'bounced': len([s for s in stats if not s.bounced is False]),\n }\n results[cid]['delivered'] = results[cid]['sent'] - results[cid]['bounced']\n results[cid]['received_ratio'] = 100.0 * results[cid]['delivered'] / (results[cid]['total'] or 1)\n results[cid]['opened_ratio'] = 100.0 * results[cid]['opened'] / (results[cid]['total'] or 1)\n results[cid]['replied_ratio'] = 100.0 * results[cid]['replied'] / (results[cid]['total'] or 1)\n return results", "def _compute_group_stats():\n group_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # TODO: move this to property of evaluation group or add dedicated data model.\n # GOAL: should be configurable from within the Django admin backend.\n #\n # MINIMAL: move to local_settings.py?\n #\n # The following dictionary defines the number of HITs each group should\n # have completed during the WMT16 evaluation campaign.\n \n for group in groups:\n _name = group.name\n \n _group_stats = HIT.compute_status_for_group(group)\n _total = _group_stats[0]\n \n if _total > 0 and not _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = 0\n elif _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = GROUP_HIT_REQUIREMENTS[_name]\n _delta = _total - _required\n _data = (_total, _required, _delta)\n \n if _data[0] > 0:\n group_stats.append((_name, _data))\n \n # Sort by number of remaining HITs.\n group_stats.sort(key=lambda x: x[1][2])\n \n # Add totals at the bottom.\n global_total = sum([x[1][0] for x in group_stats])\n global_required = sum([x[1][1] for x in group_stats])\n global_delta = global_total - global_required\n global_data = (global_total, global_required, global_delta)\n group_stats.append((\"Totals\", global_data))\n \n return group_stats", "def get_context_data(self, **kwargs):\n start, end = self.get_start_end_dates(self.request)\n if start or end is not None:\n orders = self.get_orders_with_range(start, end, False)\n \"\"\"\n HERE we use RAW SQL queries. It is ease than construct huge queryset.\n \"\"\"\n with open(os.path.join(CUR_DIR, 'sql', 'accountant_summary.sql.tpl'), 'r') as sqlfile:\n raw_sql = sqlfile.read()\n raw_sql = raw_sql.format(\n orderitem_tbl=OrderItem._meta.db_table,\n product2category_tbl=product_models.Product.categories.through._meta.db_table,\n order_tbl=Order._meta.db_table,\n open_date=pytz.utc.normalize(start).strftime('%Y-%m-%d %H:%M:%S'),\n close_date=pytz.utc.normalize(end).strftime('%Y-%m-%d %H:%M:%S'),\n )\n connection = get_default_db_connection(self.request)\n cursor = connection.cursor()\n cursor.execute(raw_sql)\n columns = [col[0] for col in cursor.description]\n category_data = {}\n total_discount = orders.aggregate(discounts=Sum('discount_total'))['discounts'] or 0\n total_quantity = 0\n total_sales = 0\n for row in cursor.fetchall():\n cdata = dict(zip(columns, row))\n category_data[cdata['category_id']] = cdata\n # total_discount += cdata['discount']\n total_quantity += cdata['amount']\n total_sales += cdata['sales']\n\n categories = dict(\n (c['id'], c)\n for c in product_models.Category.objects.all().values('id', 'name', 'parent', 'active', 'archived'))\n categories[None] = {'id': None, 'name': 'Uncategorized Items',\n 'parent': None, 'active': True, 'archived': False}\n for cid in categories:\n categories[cid].update({'sales': 0, 'amount': 0, 'percentage': 0, 'level': 0, 'child_cnt': 0})\n for cid in categories:\n if cid in category_data:\n categories[cid]['sales'] = category_data[cid]['sales']\n categories[cid]['amount'] = category_data[cid]['amount']\n if total_sales > 0:\n categories[cid]['percentage'] = 100.0 * categories[cid]['sales'] / total_sales\n parent_id = categories[cid]['parent']\n prev_parent = None\n while parent_id:\n if prev_parent is not None and prev_parent == parent_id:\n # ERROR!! Category has itself as parent!\n break\n prev_parent = parent_id\n categories[parent_id]['child_cnt'] += 1\n parent_id = categories[parent_id]['parent']\n categories[cid]['level'] += 1\n # sorting categories tree\n sorted_categories = []\n maxlevel = max(ctg['level'] for _, ctg in categories.items())\n for clevel in range(maxlevel + 1):\n thislevel_cats = [ctg for ctg in categories.values()\n if ctg['level'] == clevel and not\n ((not ctg['active'] or ctg['archived']) and\n ctg['child_cnt'] == 0 and ctg['amount'] == 0)]\n thislevel_cats = sorted(thislevel_cats, key=lambda x: (x['sales'], x['amount'], x['name']))\n if clevel == 0:\n sorted_categories = list(reversed(thislevel_cats))\n continue\n for subcat in thislevel_cats:\n if (not subcat['active'] or subcat['archived']) and subcat['child_cnt'] == 0 \\\n and subcat['amount'] == 0:\n # do not show last items if they are not active\n continue\n parent_pos = [pos for pos, c in enumerate(sorted_categories)\n if c['id'] == subcat['parent']] or [0]\n sorted_categories.insert(parent_pos[0] + 1, subcat)\n\n results = {\n 'discount': total_discount,\n 'categories': sorted_categories,\n 'total': total_quantity,\n 'total_sales': total_sales,\n 'start_date': start.strftime('%B %d, %Y'),\n 'end_date': end.strftime('%B %d, %Y'),\n }\n return results\n else:\n return {}", "def get_stats(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/stats\"\n\n _response = self.connector.http_call(\"get\", _url)\n\n # Update object\n self.stats = _response.json()", "def _refresh_cached_metadata(self, org=None):\r\n if not self._context:\r\n LOGGER.info(\"No valid Bitbucket fetcher context\")\r\n return {}\r\n\r\n remote_timestamp = self._get_fetcher_cache_timestamp()\r\n if not remote_timestamp or remote_timestamp == self._remote_cache_timestamp:\r\n LOGGER.info(\"Remote cache not ready (or timestamps match), skip refresh\")\r\n return\r\n self._remote_cache_timestamp = remote_timestamp\r\n\r\n url = \"{}/contexts/{}/orgs_metadata\".format(self._org_url, self._context)\r\n if org:\r\n url += \"/{}\".format(org)\r\n\r\n LOGGER.debug(\"Requesting BB fetcher bulk download\")\r\n metadata = self._request(url)\r\n if metadata:\r\n self._cached_metadata.update({org: metadata[org]\r\n for org in metadata\r\n if metadata[org]})\r\n LOGGER.debug(\"Cached %d org entries for context %s\",\r\n len(metadata), self._context)\r\n\r\n LOGGER.debug(\"Cached %d orgs for context %s\",\r\n len(self._cached_metadata), self._context)", "def view_map(request):\n\n # Retrieve the installations\n install_list = Installation.objects.filter(is_active=True)\n arr = []\n\n # For each Installation, add the affiliated Institutions\n for i in install_list:\n lists = Institution.objects.filter(host__name=i.name)\n arr.append(lists)\n\n d = dict(\n install_list = install_list,\n arr = arr,\n installation_count=install_list.count()\n )\n\n d.update(get_total_published_counts())\n\n return render(request, 'installations/map2.html', d)", "def summary(request):\n\n # Generate counts of some of the main objects\n num_courses = models.Course.objects.all().count()\n num_quizzes = models.Quiz.objects.all().count()\n num_questions = models.Question.objects.count()\n num_students = models.User.objects.count()\n num_visits = request.session.get('num_visits', 1)\n request.session['num_visits'] = num_visits + 1\n\n context = {\n 'num_courses': num_courses,\n 'num_quizzes': num_quizzes,\n 'num_questions': num_questions,\n 'num_students': num_students,\n 'num_visits': num_visits,\n }\n\n return Response(context)", "def contrib_viewall(request):\n is_loggedin, username = get_session_variables(request)\n contrib_list = Contribution.objects.all()\n contrib_org = {}\n if contrib_list:\n for contrib in contrib_list:\n if contrib.org_name not in contrib_org.keys():\n contrib_org[contrib.org_name] = 0\n\n for contrib in contrib_list:\n contrib_org[contrib.org_name] += 1\n \n if contrib_list:\n return render_to_response('achievement/contrib_viewall.html', \\\n {'is_loggedin':logged_in(request), \\\n 'username':username, \\\n 'contrib_list':contrib_list, 'contrib_org':contrib_org}, \\\n RequestContext(request))\n else:\n return render_to_response('achievement/noview.html', \\\n {'is_loggedin':is_loggedin, \\\n 'username':username, \\\n 'type': 'Contribution'}, \\\n RequestContext(request))", "def context():\n return dict()", "def get_context():\n context = {}\n cfg = load_service_config(\"lighttpd\")\n ip = \"127.0.0.1\"\n enable_caching = False\n try:\n mconfig = load_service_mconfig_as_json('lighttpd')\n enable_caching = mconfig.enable_caching\n except LoadConfigError:\n logging.info(\"Using default values for service 'lighttpd'\")\n\n if enable_caching:\n ip = get_ip_from_if(cfg['interface'])\n\n context['interface_ip'] = ip\n context['store_root'] = cfg['store_root']\n\n return context", "def get_statistics(self):\n statistics = {\n 'entry': 0,\n 'bandwidth': 0,\n 'exit': 0,\n 'pages': 0\n }\n downloads = statistics.copy()\n \n portal_state = getMultiAdapter(\n (self.context, self.request), name=u'plone_portal_state'\n )\n context_state = getMultiAdapter(\n (self.context, self.request), name=u'plone_context_state'\n )\n site = portal_state.portal()\n \n url = self.context.absolute_url().replace(site.absolute_url(), '')\n urls = []\n if url == '':\n url = '/'\n quoted_url = urllib.quote(url)\n \n urls.append(quoted_url)\n urls.append(quoted_url + '/view')\n canonical_url = urllib.quote(context_state.canonical_object_url())\n if canonical_url not in urls:\n urls.append(canonical_url)\n urls.append(canonical_url + '/view')\n\n query = 'SELECT * FROM statistics WHERE url IN %s' % str(tuple(urls))\n results = Session.execute(query).fetchall()\n if results:\n for row in results:\n for key in statistics.keys():\n statistics[key] = statistics[key] + int(row[key])\n\n results_dw = Session.execute(\n 'SELECT * FROM statistics WHERE url=\"%s/at_download%%\"' % quoted_url).fetchall()\n if results_dw:\n for row in rows_stat:\n for key in statistics.keys():\n downloads[key] = downloads[key] + int(row[key])\n statistics['downloads'] = downloads['pages']\n return statistics", "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n project = ProjectPermissionsMixin.get_object(self)\n\n context = self._get_status(context)\n context = self._get_method(context)\n context = self._get_users(context, project)\n\n return context", "def info(self, request):\n data = serialize_procs(\n self.loaded_procs,\n use_concurrency=USE_CONCURRENCY,\n serializer_class=DjangoProcSerializer,\n )\n return JsonResponse(data=data, safe=False)", "def people_view(request):\n if request.user.is_authenticated:\n # TODO Objective 4: create a list of all users who aren't friends to the current user (and limit size)\n\n all_users = models.UserInfo.objects.all()\n myuserInfo = models.UserInfo.objects.get(user=request.user)\n myFriends = myuserInfo.friends.all()\n\n all_people = []\n for personInfo in all_users.all():\n if personInfo not in myFriends:\n all_people.append(personInfo)\n\n num_visits = request.session.get('num_visits', 0)\n\n listUpperBound = num_visits + 2\n peopleSize = len(all_people)\n\n new_list = []\n if listUpperBound < peopleSize - 1:\n for i in range(listUpperBound):\n new_list.append(all_people[i])\n else:\n new_list = all_people\n\n # TODO Objective 5: create a list of all friend requests to current user\n friend_list = models.FriendRequest.objects.filter(to_user=myuserInfo)\n given_list = models.FriendRequest.objects.filter(from_user=myuserInfo)\n\n sent_list=[]\n for stuff in given_list:\n sent_list.append(stuff.to_user)\n\n print(sent_list)\n\n friend_requests = []\n\n for friend in friend_list:\n friend_requests.append(friend.from_user)\n\n context = { 'user_info' : myuserInfo,\n 'all_people' : all_people,\n 'num_visits' : num_visits,\n 'new_list' : new_list,\n 'friend_requests' : friend_requests,\n 'sent_list' : sent_list}\n\n return render(request,'people.djhtml',context)\n\n request.session['failed'] = True\n return redirect('login:login_view')", "def info_cache(self):\n self.info.info()\n self.dataset.info()\n self.category.info()", "def _get_statistics(self, cr, uid, ids, name, arg, context=None):\n Statistics = self.pool['mail.mail.statistics']\n results = dict.fromkeys(ids, False)\n for mid in ids:\n stat_ids = Statistics.search(cr, uid, [('mass_mailing_id', '=', mid)], context=context)\n stats = Statistics.browse(cr, uid, stat_ids, context=context)\n results[mid] = {\n 'total': len(stats),\n 'failed': len([s for s in stats if not s.scheduled is False and s.sent is False and not s.exception is False]),\n 'scheduled': len([s for s in stats if not s.scheduled is False and s.sent is False and s.exception is False]),\n 'sent': len([s for s in stats if not s.sent is False]),\n 'opened': len([s for s in stats if not s.opened is False]),\n 'replied': len([s for s in stats if not s.replied is False]),\n 'bounced': len([s for s in stats if not s.bounced is False]),\n }\n results[mid]['delivered'] = results[mid]['sent'] - results[mid]['bounced']\n results[mid]['received_ratio'] = 100.0 * results[mid]['delivered'] / (results[mid]['total'] or 1)\n results[mid]['opened_ratio'] = 100.0 * results[mid]['opened'] / (results[mid]['total'] or 1)\n results[mid]['replied_ratio'] = 100.0 * results[mid]['replied'] / (results[mid]['total'] or 1)\n return results", "def company_context(request):\n return {'COMPANY': settings.COMPANY}", "def __context_init(self):\n self._context.data[\"services\"] = copy.deepcopy(INITIAL_SRVDATA)", "def __init__(self, u):\n self.context = {\n 'u': u,\n 'cover': modify_image_url(str(u.profile.cover), 'cover'),\n 'avatar': modify_image_url(str(u.profile.avatar), 'avatar'),\n 'organizations': \"\",\n 'introduction': \"\",\n 'statement': \"\",\n 'work_history': \"\",\n 'experiences': \"\",\n 'related_link': \"\",\n 'urls': \"\",\n 'educational_bg': \"\",\n 'educations': \"\",\n 'portfolio': \"\",\n 'works': \"\",\n 'friends': [],\n 'introductions_from_frends': \"\",\n }\n if u.organization_set.exists():\n self.context['organizations'] = u.organization_set.all()\n\n if Privacy.objects.exists():\n self.context['privacy'] = Privacy.objects.all()\n\n self.context['introduction'] = u.profile.introduction\n\n self.context['statement'] = u.profile.statement\n\n self.context['work_history'] = u.profile.workhistory\n if self.context['work_history'].experience_set.exists():\n self.context['experiences'] = self.context['work_history'].experience_set.all().order_by('-from_date')\n\n self.context['portfolio'] = u.profile.portfolio\n if self.context['portfolio'].work_set.exists():\n self.context['works'] = self.context['portfolio'].work_set.all().order_by('-made_at')\n\n self.context['related_link'] = u.profile.relatedlink\n if self.context['related_link'].url_set.exists():\n self.context['urls'] = self.context['related_link'].url_set.all()\n\n self.context['educational_bg'] = u.profile.educationalbackground\n if self.context['educational_bg'].education_set.exists():\n self.context['educations'] = self.context['educational_bg'].education_set.all().order_by('-graduated_at')\n\n if u.friendrelationship_follower.exists():\n friend_relationships = u.friendrelationship_follower.all()\n for fr in friend_relationships:\n f = User.objects.get(pk=fr.followed_user_id)\n self.context['friends'].append(f)\n\n if u.introductionfromfriend_introduced_user.exists():\n self.context['introductions_from_frends'] = u.introductionfromfriend_introduced_user.all()", "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n try:\n match_pk = int(kwargs['match_pk'][:6]) # afkappen voor de veiligheid\n match = (CompetitieMatch\n .objects\n .select_related('uitslag')\n .prefetch_related('uitslag__scores')\n .get(pk=match_pk))\n except (ValueError, CompetitieMatch.DoesNotExist):\n raise Http404('Wedstrijd niet gevonden')\n\n rondes = match.regiocompetitieronde_set.all()\n if len(rondes) == 0:\n raise Http404('Geen regio wedstrijd')\n ronde = rondes[0]\n\n rol_nu, functie_nu = rol_get_huidige_functie(self.request)\n if ronde.regiocompetitie.functie != functie_nu:\n # mag niet wijzigen\n raise PermissionDenied('Niet de beheerder')\n\n context['competitie'] = comp = ronde.regiocompetitie.competitie\n is_25m = (comp.afstand == '25')\n\n context['regio'] = ronde.regiocompetitie.regio\n context['ronde'] = ronde\n context['wedstrijd'] = match\n\n if ronde.regiocompetitie.inschrijf_methode == INSCHRIJF_METHODE_1:\n jaar = ronde.regiocompetitie.competitie.begin_jaar\n week = settings.COMPETITIES_START_WEEK\n context['datum_eerste'] = competitie_week_nr_to_date(jaar, week)\n\n if ronde.regiocompetitie.competitie.afstand == '18':\n week = settings.COMPETITIE_18M_LAATSTE_WEEK + 1\n else:\n week = settings.COMPETITIE_25M_LAATSTE_WEEK + 1\n week += 1\n if week < settings.COMPETITIES_START_WEEK:\n jaar += 1\n context['datum_laatste'] = competitie_week_nr_to_date(jaar, week) # TODO: moet 1 dag eerder?\n else:\n # laat een dag van de week kiezen\n\n # zoek het weeknummer waarin deze wedstrijd gehouden moet worden\n context['opt_weekdagen'] = opt_weekdagen = list()\n\n # bepaal de weekdag uit de huidige wedstrijd datum\n jaar = ronde.regiocompetitie.competitie.begin_jaar\n when = competitie_week_nr_to_date(jaar, ronde.week_nr)\n ronde.maandag = when\n\n verschil = match.datum_wanneer - when\n dag_nr = verschil.days\n\n for weekdag_nr, weekdag_naam in WEEK_DAGEN:\n obj = SimpleNamespace()\n obj.weekdag_nr = weekdag_nr\n obj.weekdag_naam = weekdag_naam\n obj.datum = when\n obj.actief = (dag_nr == weekdag_nr)\n opt_weekdagen.append(obj)\n\n when += datetime.timedelta(days=1)\n # for\n\n match.tijd_begin_wedstrijd_str = match.tijd_begin_wedstrijd.strftime(\"%H:%M\")\n\n if ronde.cluster:\n verenigingen = ronde.cluster.vereniging_set.order_by('ver_nr')\n else:\n verenigingen = ronde.regiocompetitie.regio.vereniging_set.order_by('ver_nr')\n context['verenigingen'] = verenigingen\n\n if not match.vereniging and verenigingen.count() > 0:\n match.vereniging = verenigingen[0]\n match.save()\n\n if not match.locatie and match.vereniging:\n # alle binnen accommodaties hebben discipline_indoor=True\n # externe locaties met dezelfde discipline komen ook mee\n locaties = match.vereniging.locatie_set.exclude(zichtbaar=False).filter(discipline_indoor=True)\n\n if is_25m:\n # neem ook externe locaties mee met discipline=25m1pijl\n locaties_25m1p = match.vereniging.locatie_set.exclude(zichtbaar=False).filter(discipline_25m1pijl=True)\n locaties = locaties.union(locaties_25m1p)\n\n if locaties.count() > 0:\n match.locatie = locaties[0] # pak een default\n # maak een slimmere keuze\n for locatie in locaties:\n if is_25m:\n if locatie.banen_25m > 0:\n match.locatie = locatie\n else:\n if locatie.banen_18m > 0:\n match.locatie = locatie\n # for\n match.save()\n\n context['all_locaties'] = all_locs = list()\n pks = [ver.pk for ver in verenigingen]\n for ver in (Vereniging\n .objects\n .prefetch_related('locatie_set')\n .filter(pk__in=pks)):\n for loc in ver.locatie_set.exclude(zichtbaar=False):\n keep = False\n if is_25m:\n if loc.banen_25m > 0 and (loc.discipline_indoor or loc.discipline_25m1pijl):\n keep = True\n else:\n if loc.discipline_indoor and loc.banen_18m > 0:\n keep = True\n\n if keep:\n all_locs.append(loc)\n loc.ver_pk = ver.pk\n keuze = loc.adres.replace('\\n', ', ')\n if loc.notities:\n keuze += ' (%s)' % loc.notities\n if not keuze:\n keuze = loc.plaats\n if not keuze:\n keuze = 'Locatie zonder naam (%s)' % loc.pk\n loc.keuze_str = keuze\n if match.locatie == loc:\n loc.selected = True\n # for\n # for\n\n context['heeft_wkl'] = heeft_wkl = (ronde.regiocompetitie.inschrijf_methode == INSCHRIJF_METHODE_2)\n if heeft_wkl:\n context['wkl_indiv'], context['wkl_team'] = self._get_wedstrijdklassen(ronde.regiocompetitie, match)\n\n context['url_opslaan'] = reverse('CompLaagRegio:regio-wijzig-wedstrijd',\n kwargs={'match_pk': match.pk})\n\n if ronde.regiocompetitie.inschrijf_methode == INSCHRIJF_METHODE_1:\n url_planning_week = reverse('CompLaagRegio:regio-methode1-planning',\n kwargs={'ronde_pk': ronde.pk})\n else:\n url_planning_week = reverse('CompLaagRegio:regio-ronde-planning',\n kwargs={'ronde_pk': ronde.pk})\n\n uitslag = match.uitslag\n if uitslag and (uitslag.is_bevroren or uitslag.scores.count()):\n context['kan_niet_verwijderen'] = True\n else:\n context['url_verwijderen'] = reverse('CompLaagRegio:regio-verwijder-wedstrijd',\n kwargs={'match_pk': match.pk})\n\n context['kruimels'] = [\n (reverse('Competitie:kies'), 'Bondscompetities'),\n (reverse('CompBeheer:overzicht', kwargs={'comp_pk': comp.pk}), comp.beschrijving.replace(' competitie', '')),\n (reverse('CompLaagRegio:regio-planning', kwargs={'deelcomp_pk': ronde.regiocompetitie.pk}), 'Planning'),\n (url_planning_week, 'Week'),\n (None, 'Wijzig wedstrijd')\n ]\n\n if ronde.cluster:\n tup = (reverse('CompLaagRegio:regio-cluster-planning', kwargs={'cluster_pk': ronde.cluster.pk,\n 'deelcomp_pk': ronde.regiocompetitie.pk}),\n 'Cluster')\n context['kruimels'].insert(-2, tup)\n\n menu_dynamics(self.request, context)\n return context", "def process(self):\n if self.user is not None:\n async_queries = []\n \n # DEBUGGING\n # prefs = []\n \n # Load in preferences\n for k, v in self.user.preferences.iteritems():\n for l in self.user.preferences[k]:\n gmap_search = GoogleMapSearch()\n gmap_search.parameters['location'] = '40,-88'\n gmap_search.parameters['radius'] = '10000'\n gmap_search.parameters['keyword'] = l\n gmap_search.parameters['language'] = 'en'\n # gmap_search.parameters['minprice'] = None\n # gmap_search.parameters['maxprice'] = None\n # gmap_search.parameters['name'] = None\n # gmap_search.parameters['opennow'] = None\n # gmap_search.parameters['rankby'] = None\n if k != 'keywords': gmap_search.parameters['types'] = k\n # gmap_search.parameters['pagetoken'] = None\n \n # DEBUGGING\n # prefs.append(gmap_search.parameters)\n \n async_queries.append(gmap_search)\n \n # Send all requests\n async_pool = Pool(processes=8)\n response = async_pool.map(parallel_search, async_queries)\n filtered_responses = ['']*len(response)\n for i, r in enumerate(response):\n filtered_response = {k:v for (k, v) in r.iteritems()}\n filtered_response['category'] = {'name': None, 'type': None}\n if async_queries[i].parameters.has_key('type') and async_queries[i].parameters['types'] != '' and async_queries[i].parameters['types'] is not None: filtered_response['category']['name'] = async_queries[i].parameters['types']\n else: filtered_response['category']['name'] = 'keyword'\n filtered_response['category']['type'] = async_queries[i].parameters['keyword']\n filtered_responses[i] = filtered_response\n return json.dumps(filtered_responses)\n else: return None", "def get_context(self):\n return {}", "def base_context(request):\n if not hasattr(request, 'user'):\n return {}\n return {\n 'is_project_admin': is_project_admin(request.user)\n }", "def fetch_stats(self, keys: tuple = None):\n if not keys:\n # Fetch the stats needed for play mode\n keys = ('pvp',)\n\n # Call the superclass fetch stats method\n return super().fetch_stats(keys)", "def index(request):\n home_user = request.user.profile\n \"\"\"num_activities = Activity.objects.count()\"\"\"\n Cactivity = CompletedActivity.objects.filter(user=home_user)\n UActivity = Cactivity.values('activity_id', 'activity__name', 'activity__value', 'activity__group').annotate \\\n (count=Count('activity__name'), earned=Sum('activity__value'))\n TimesCompelted = Cactivity.annotate(count=Count('activity__name'))\n # Generate counts of some of the main objects\n\n context = {\n 'huser': home_user,\n 'Lname' : home_user.user.last_name,\n 'Fname': home_user.user.first_name,\n 'num_activities': 1,\n 'activity_list' : UActivity,\n \"times_completed\" : TimesCompelted\n }\n\n # Render the HTML template index.html with the data in the context variable\n return render(request, 'index.html', context=context)", "def context_processor(request):\n if not hasattr(request, 'user'):\n raise ValueError('site module context processor requires \"django.contrib.auth.context_processors.auth\"'\n 'to be in TEMPLATE_CONTEXT_PROCESSORS in your settings file.')\n module = None\n\n if request.resolver_match:\n module = module_registry.get_module(request.resolver_match.app_name)\n if module is None:\n logger.info('You need to add SiteModuleMixin to AppConfig instance of \"{}\"'.format(request.resolver_match.app_name))\n attrs = \", \".join( \"{}={}\".format(attr, getattr( request.resolver_match, attr, \"None\")) for attr in [ 'url_name', 'app_name', 'namespace', 'route', 'viewName'])\n logger.debug(\"request.resolver_match ==> {}\".format(attrs))\n pass\n logger.debug(\"module ==> {}\".format(\"None\" if module is None else module.name))\n pass\n else:\n logger.debug(\"request.resolver_match is None\")\n pass\n\n modules = module_registry.available_modules(request.user)\n\n logger.debug(\"n-modules {}: {} in {}\".format(len(modules), module.name if module is not None else \"NONE\", \",\".join([module.name for module in modules])))\n\n return {\n 'modules': modules,\n 'current_module': module,\n }", "def stats():\n return jsonify(shorten.get_stats(get_db(), app.config['MINI_URL_BASE']))", "def _fetchPerf(self):\n self.state = ZenProcessTask.STATE_FETCH_PERF\n\n oids = []\n for pid in self._deviceStats.pids:\n if not AS400PLUG in self._device.zCollectorPlugins:\n oids.extend([CPU + str(pid), MEM + str(pid)])\n else:\n oids.extend([AS400CPU + str(pid)])\n if oids:\n singleOids = set()\n results = {}\n oidsToTest = oids\n chunkSize = self._maxOidsPerRequest\n while oidsToTest:\n for oidChunk in chunk(oidsToTest, chunkSize):\n try:\n log.debug(\"%s fetching oid(s) %s\" % (self._devId, oidChunk))\n result = yield self._get(oidChunk)\n results.update(result)\n except (error.TimeoutError, Snmpv3Error) as e:\n log.debug(\"error reading oid(s) %s - %s\", oidChunk, e)\n singleOids.update(oidChunk)\n oidsToTest = []\n if singleOids and chunkSize > 1:\n chunkSize = 1\n log.debug(\"running oids for %s in single mode %s\" % (self._devId, singleOids))\n oidsToTest = list(singleOids)\n self._storePerfStats(results)", "def personal_group_metrics(request):\n\ttotal_pms, median_pm_idx, median_pm_tuple, aggregate_pm_chats, avg_chat_per_pm, total_pgs, median_pg_idx, median_pg_tuple, aggregate_pg_chats, \\\n\tavg_chat_per_pg, pms_with_sws, pgs_with_sws = avg_num_of_chats_per_type()\n\t\n\ttotal_pms_sw, median_pm_sw_idx, median_pm_sw_tuple, aggregate_pm_sws, avg_sw_per_pm, total_pgs_sw, median_pg_sw_idx, median_pg_sw_tuple, \\\n\taggregate_pg_sws, avg_sw_per_pg = avg_num_of_switchovers_per_type()\n\n\ttotal_pgs_sess, total_pms_sess, med_sess_per_user_per_pg, med_sess_per_user_per_pm, avg_sess_per_user_per_pg, avg_sess_per_user_per_pm, \\\n\tavg_users_per_pm, med_users_per_pm, avg_users_per_pg, med_users_per_pg, avg_sess_per_user_per_two_user_pm, med_sess_per_user_per_two_user_pm,\\\n\ttotal_two_user_pms, avg_users_per_two_user_pm, med_users_per_two_user_pm = avg_sessions_per_type()\n\n\treturn render(request,\"personal_group/metrics/personal_group_metrics.html\",{'total_pms':total_pms,'agg_pm_chats':aggregate_pm_chats,\\\n\t\t'avg_pm_chats':avg_chat_per_pm,'total_pgs':total_pgs,'agg_pg_chats':aggregate_pg_chats,'avg_pg_chats':avg_chat_per_pg,\\\n\t\t'med_pm_idx':median_pm_idx,'med_pg_idx':median_pg_idx,'med_pm_tup':median_pm_tuple,'med_pg_tup':median_pg_tuple,\\\n\t\t'total_pms_sw':total_pms_sw,'agg_pm_sws':aggregate_pm_sws,'avg_pm_sws':avg_sw_per_pm,'total_pgs_sw':total_pgs_sw,\\\n\t\t'agg_pg_sws':aggregate_pg_sws,'avg_pg_sws':avg_sw_per_pg,'med_pm_idx_sw':median_pm_sw_idx,'med_pg_idx_sw':median_pg_sw_idx,\\\n\t\t'med_pm_tup_sw':median_pm_sw_tuple,'med_pg_tup_sw':median_pg_sw_tuple,'avg_sess_per_user_per_pg':avg_sess_per_user_per_pg,\\\n\t\t'avg_sess_per_user_per_pm':avg_sess_per_user_per_pm,'med_sess_per_user_per_pg':med_sess_per_user_per_pg,\\\n\t\t'med_sess_per_user_per_pm':med_sess_per_user_per_pm,'pgs_sampled_sess':total_pgs_sess,'pms_sampled_sess':total_pms_sess,\\\n\t\t'avg_users_per_pm':avg_users_per_pm, 'med_users_per_pm':med_users_per_pm,'avg_users_per_pg':avg_users_per_pg,\\\n\t\t'med_users_per_pg':med_users_per_pg,'pms_with_sws':pms_with_sws,'pgs_with_sws':pgs_with_sws,'total_two_user_pms':total_two_user_pms,\\\n\t\t'avg_sess_per_user_per_two_user_pm':avg_sess_per_user_per_two_user_pm,'med_sess_per_user_per_two_user_pm':med_sess_per_user_per_two_user_pm,\\\n\t\t'avg_users_per_two_user_pm':avg_users_per_two_user_pm, 'med_users_per_two_user_pm':med_users_per_two_user_pm})", "def construct_global_ctx(self):\n super().construct_global_ctx()\n gtx = self.gtx\n rc = self.rc\n if \"groups\" in self.needed_dbs:\n rc.pi_id = get_pi_id(rc)\n rc.coll = f\"{TARGET_COLL}\"\n try:\n if not rc.database:\n rc.database = rc.databases[0][\"name\"]\n except:\n pass\n colls = [\n sorted(\n all_docs_from_collection(rc.client, collname), key=_id_key\n )\n for collname in self.needed_dbs\n ]\n for db, coll in zip(self.needed_dbs, colls):\n gtx[db] = coll\n gtx[\"all_docs_from_collection\"] = all_docs_from_collection\n gtx[\"float\"] = float\n gtx[\"str\"] = str\n gtx[\"zip\"] = zip", "def friend_stats(request):\n \n r = {}\n fb_ids = FacebookProfile.objects.all().values(\"facebook_id\") \n for u in FacebookProfile.objects.all():\n friends = Friends.objects.filter(facebook_id__in=fb_ids)\n num_friends = Friends.objects.filter(facebook_id=u.facebook_id, friends__in=friends).count()\n participant = OTNUser.objects.get(facebook_profile__facebook_id=u.facebook_id)\n r[u.facebook_id]=\"%s (%d): %d\"%(participant.name, participant.id, num_friends)\n\n return JSONHttpResponse(r)", "def get_context_data(self, **kwargs):\n user = ImagerProfile.objects.get(user__username=self.request.user.username)\n # import pdb;\n context = super(ProfileView, self).get_context_data(**kwargs)\n photos = self.request.user.photos.all()\n ph_public = len(photos.filter(published=\"Public\"))\n ph_private = len(photos.filter(published=\"Private\"))\n albums = self.request.user.albums.all()\n al_public = len(albums.filter(published=\"Public\"))\n al_private = len(albums.filter(published=\"Private\"))\n context = {'user': user, 'ph_public': ph_public, 'ph_private': ph_private,\n 'al_public': al_public, 'al_private': al_private}\n return context", "def cache_component_stats(self, k):\n return (\n self.m_N_numerators[k].copy(),\n self.S_N_partials[k].copy(),\n self.log_prod_vars[k],\n self.inv_vars[k].copy(),\n self.counts[k]\n )", "def get_context(self):\n\n return {\n \"@context\": {\n \"first_name\": \"http://schema.org/first_name\",\n \"last_name\": \"http://schema.org/last_name\",\n \"email\": \"http://schema.org/email\",\n \"birthday\": \"http://schema.org/birthDate\",\n \"phone\": \"http://schema.org/telephone\",\n \"languages\": \"http://schema.org/languages\",\n \"skills\": \"http://schema.org/skills\",\n \"number_of_reviews\": \"http://schema.org/number_of_reviews\",\n \"number_of_interviews\": \"http://schema.org/number_of_interviews\",\n \"id\": \"http://schema.org/id\"\n }\n }", "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n try:\n deelcomp_pk = int(kwargs['deelcomp_pk'][:6]) # afkappen voor de veiligheid\n deelcomp = (Regiocompetitie\n .objects\n .select_related('competitie',\n 'regio',\n 'regio__rayon')\n .get(pk=deelcomp_pk))\n except (ValueError, Regiocompetitie.DoesNotExist):\n raise Http404('Competitie niet gevonden')\n\n context['deelcomp'] = deelcomp\n context['regio'] = deelcomp.regio\n\n mag_wijzigen = (self.rol_nu == Rollen.ROL_RCL and self.functie_nu.regio == deelcomp.regio)\n\n if deelcomp.inschrijf_methode == INSCHRIJF_METHODE_1:\n self._get_methode_1(context, deelcomp)\n context['inschrijfmethode'] = '1 (keuze sporter)'\n else:\n self._get_methode_2_3(context, deelcomp, mag_wijzigen)\n\n if deelcomp.inschrijf_methode == INSCHRIJF_METHODE_2:\n context['inschrijfmethode'] = '2 (wedstrijdklasse naar locatie)'\n else:\n context['inschrijfmethode'] = '3 (sporter voorkeur dagdeel)'\n\n if self.rol_nu in (Rollen.ROL_BB, Rollen.ROL_BKO, Rollen.ROL_RKO):\n rayon = Kampioenschap.objects.get(competitie=deelcomp.competitie,\n deel=DEEL_RK,\n rayon=deelcomp.regio.rayon)\n context['url_rayon'] = reverse('CompLaagRayon:planning',\n kwargs={'deelkamp_pk': rayon.pk})\n\n comp = deelcomp.competitie\n\n if self.rol_nu == Rollen.ROL_HWL:\n # TODO: deze terug verwijzing klopt niet helemaal meer. Zou Beheer Vereniging kunnen zijn als we een nieuw kaartje maken om de planning in te zien\n comp_url = reverse('Competitie:overzicht', kwargs={'comp_pk': comp.pk})\n else:\n comp_url = reverse('CompBeheer:overzicht', kwargs={'comp_pk': comp.pk})\n\n context['kruimels'] = (\n (reverse('Competitie:kies'), 'Bondscompetities'),\n (comp_url, comp.beschrijving.replace(' competitie', '')),\n (None, 'Planning'),\n )\n\n menu_dynamics(self.request, context)\n return context", "def home(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/index.html',\n {\n 'title':'Home Page',\n 'proposals_todo': Proposals.objects.filter(proposer=request.user, last_status='P').count(),\n 'proposals_accepted': Proposals.objects.filter(Q(last_status='A') & ( \n Q(proposer=request.user) | \n Q(coproposers__uid__exact=request.user) | \n Q(local_contacts__uid__exact=request.user))).distinct().count(),\n 'proposals_director': Proposals.objects.filter(last_status='D').count(),\n 'proposals_userofficeS': Proposals.objects.filter(last_status__in='S').count(),\n 'proposals_userofficeU': Proposals.objects.filter(last_status__in='U').count(),\n 'proposals_localcontact': Proposals.objects.filter(local_contacts__uid__exact=request.user, last_status='T').count(),\n 'proposals_panel': Proposals.objects.filter(last_status='W').count(),\n 'proposals_my_panel': Proposals.objects.filter(reporter__uid=request.user, last_status='R').count(),\n 'proposals_my_board': Proposals.objects.filter(reporter__uid=request.user, last_status='B').count(),\n 'proposals_board': Proposals.objects.filter(last_status='B').count(),\n 'report_missing': Report.objects.filter((Q(pdf__isnull=True) | Q(pdf__exact='')) & Q(proposal__proposer=request.user)).distinct().count(),\n }\n )", "def _get_community_platform_details(community_platform_name: str) -> Dict[str, Any]:\n try:\n importlib.import_module(name=\"scrapli_community\")\n except ModuleNotFoundError as exc:\n title = \"Module not found!\"\n message = (\n \"Scrapli Community package is not installed!\\n\"\n \"To resolve this issue, install the transport plugin. You can do this in one of \"\n \"the following ways:\\n\"\n \"1: 'pip install -r requirements-community.txt'\\n\"\n \"2: 'pip install scrapli[community]'\"\n )\n warning = format_user_warning(title=title, message=message)\n raise ScrapliModuleNotFound(warning) from exc\n\n try:\n # replace any underscores in platform name with \".\"; should support any future platforms\n # that dont have \"child\" os types -- i.e. just \"cisco\" instead of \"cisco_iosxe\"\n scrapli_community_platform = importlib.import_module(\n name=f\"scrapli_community.{community_platform_name.replace('_', '.')}\"\n )\n except ModuleNotFoundError as exc:\n title = \"Module not found!\"\n message = (\n f\"Scrapli Community platform '{community_platform_name}` not found!\\n\"\n \"To resolve this issue, ensure you have the correct platform name, and that a scrapli \"\n \" community platform of that name exists!\"\n )\n warning = format_user_warning(title=title, message=message)\n raise ScrapliModuleNotFound(warning) from exc\n\n platform_details_original = getattr(scrapli_community_platform, \"SCRAPLI_PLATFORM\", {})\n if not platform_details_original:\n msg = \"Community platform missing required attribute `SCRAPLI_PLATFORM`\"\n raise ScrapliException(msg)\n platform_details: Dict[str, Any] = deepcopy(platform_details_original)\n return platform_details", "def get_stats() -> dict:\n\n url = f\"{CONFIG.POSTGREST}/app_about_stats\"\n\n try:\n response = requests.get(url)\n response.raise_for_status()\n except (requests.ConnectionError, requests.exceptions.HTTPError) as e:\n APP.logger.error(f'API request for db stats returned: {e}')\n else:\n results = json.loads(response.text)\n # APP.logger.debug(results)\n return results", "def find_and_print_network_communities(G, code_dict=None):\n\n comm_dict = partition(G)\n\n comm_members = {}\n for comm in set(comm_dict.values()):\n countries = [node for node in comm_dict if comm_dict[node] == comm]\n if code_dict is not None:\n countries = [code_dict[code] for code in countries]\n\n comm_members[comm] = countries\n\n return comm_members, get_modularity(G, comm_dict)", "def stats(self):\r\n return {}", "def package_context(subscriptions):\n data = {'subscriptions' : subscriptions}\n data['subscription_count'] = len(subscriptions)\n return data", "def home(request):\n\n result = {}\n \n result['surveys'] = []\n result['review_requests'] = []\n\n u = request.user\n\n # get surveys\n surveys = Survey.objects.all()\n for s in surveys:\n status, created = eval(\"%s.objects.get_or_create(survey=s, user=u)\"%s.model_name)\n if created:\n status.uuid_token = uuid.uuid4()\n status.save()\n if not status.completed:\n result['surveys'].append(s.summary())\n \n my_products = TransactionLineItem.objects.filter(transaction__party=u).values('product')\n # Find review requests related to product I have purchased, that I haven't reviewed \n reqs = ReviewRequest.objects.exclude(requester=u).filter(product__in=my_products).exclude(replies__reviewer=u)\n \n for r in reqs:\n result['review_requests'].append(r.get_json(me=u))\n\n # TODO: Group purchase requests\n\n # get other people's feeds, filter by friends if in social group\n feeds = Feed.objects.exclude(actor=u)\n result['feeds'] = [ f.get_json(me=u) for f in feeds ]\n\n return JSONHttpResponse(result)", "def gather_cache(self):\n cache = {\"grains\": {}, \"pillar\": {}}\n if self.grains or self.pillar:\n if self.opts.get(\"minion_data_cache\"):\n minions = self.cache.list(\"minions\")\n if not minions:\n return cache\n for minion in minions:\n total = self.cache.fetch(\"minions/{}\".format(minion), \"data\")\n\n if \"pillar\" in total:\n if self.pillar_keys:\n for key in self.pillar_keys:\n if key in total[\"pillar\"]:\n cache[\"pillar\"][minion][key] = total[\"pillar\"][key]\n else:\n cache[\"pillar\"][minion] = total[\"pillar\"]\n else:\n cache[\"pillar\"][minion] = {}\n\n if \"grains\" in total:\n if self.grain_keys:\n for key in self.grain_keys:\n if key in total[\"grains\"]:\n cache[\"grains\"][minion][key] = total[\"grains\"][key]\n else:\n cache[\"grains\"][minion] = total[\"grains\"]\n else:\n cache[\"grains\"][minion] = {}\n return cache", "def list_merge_requests(request):\n if request.user.profile.superuser == False:\n raise Http404\n \n lMergeRequests = PersonMergeRequest.objects.filter()\n for mergeRequest in lMergeRequests:\n mergeRequest.from_adjuducations_count = ContestAdjudicator.objects.filter(person=mergeRequest.source_person).count()\n mergeRequest.to_adjuducations_count = ContestAdjudicator.objects.filter(person=mergeRequest.destination_person).count()\n \n mergeRequest.from_compositions_count = TestPiece.objects.filter(composer=mergeRequest.source_person).count()\n mergeRequest.to_compositions_count = TestPiece.objects.filter(composer=mergeRequest.destination_person).count()\n \n mergeRequest.from_arranger_count = TestPiece.objects.filter(arranger=mergeRequest.source_person).count()\n mergeRequest.to_arranger_count = TestPiece.objects.filter(arranger=mergeRequest.destination_person).count()\n \n return render_auth(request, 'move/list_person_merge_requests.html', {'MergeRequests' : lMergeRequests})", "def _get_daily_statistics(self, cr, uid, ids, field_name, arg, context=None):\n obj = self.pool['mail.mail.statistics']\n res = {}\n for mailing in self.browse(cr, uid, ids, context=context):\n res[mailing.id] = {}\n date = mailing.sent_date if mailing.sent_date else mailing.create_date\n date_begin = datetime.strptime(date, tools.DEFAULT_SERVER_DATETIME_FORMAT)\n date_end = date_begin + relativedelta.relativedelta(days=self._period_number - 1)\n date_begin_str = date_begin.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)\n date_end_str = date_end.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)\n domain = [('mass_mailing_id', '=', mailing.id), ('opened', '>=', date_begin_str), ('opened', '<=', date_end_str)]\n res[mailing.id]['opened_daily'] = json.dumps(self.__get_bar_values(cr, uid, obj, domain, ['opened'], 'opened_count', 'opened:day', date_begin, context=context))\n domain = [('mass_mailing_id', '=', mailing.id), ('replied', '>=', date_begin_str), ('replied', '<=', date_end_str)]\n res[mailing.id]['replied_daily'] = json.dumps(self.__get_bar_values(cr, uid, obj, domain, ['replied'], 'replied_count', 'replied:day', date_begin, context=context))\n return res", "def _render_merge_proposals(self) -> defaultdict:\n logging.debug(f\"Fetching merge proposals for {self.user.display_name}\")\n proposals = defaultdict(list)\n for status in [\n \"Work in progress\",\n \"Needs review\",\n \"Approved\",\n \"Rejected\",\n \"Merged\",\n \"Code failed to merge\",\n \"Queued\",\n \"Superseded\",\n ]:\n all_mps = self.user.getMergeProposals(status=status)\n for mp in all_mps:\n if not in_window(self.window, mp.date_created):\n break\n project = mp.web_link.split(\"/\")[4]\n proposals[project].append(mp.web_link)\n return proposals", "def get_online_count():\n return dict(online_user=get_online_users())", "def consolidate_other(self):\n record = self.db[self.args['cstats_table']].find_one({'type': 'client'})\n if not record:\n self.log.critical('Could not get the \"client\" key in the \"cstats_table\"')\n return\n for k in record.keys():\n if k in ['_id', 'type', 'stats']:\n continue\n self.other[k] = record[k]\n self.stats.update(record.get('stats', {}))", "def utility_processor():\n def modethumb(name):\n \"\"\"Return a URL for an image related to the match mode.\"\"\"\n name = name.lower()\n if os.path.isfile(os.path.join(basedir,\n 'app', 'static', 'img', 'modethumbs', name + '.png')):\n return flask.url_for('static', filename='img/modethumbs/' + name + '.png')\n else:\n return flask.url_for('static', filename='img/modethumbs/othermode.png')\n\n def antag_objs(matchid, antagkey):\n \"\"\"Retrieve the objectives for an antag from a given match.\"\"\"\n return db.session.query(Match).get(matchid).antagobjs.filter(AntagObjective.mindkey == antagkey)\n\n # def add_months(sourcedate, months):\n # \"\"\"Add months to original date. Returns a datetime.\"\"\"\n # month = sourcedate.month - 1 + months\n # year = int(sourcedate.year + month / 12)\n # month = month % 12 + 1\n # day = min(sourcedate.day, calendar.monthrange(year, month)[1])\n # return datetime.date(year, month, day)\n\n def population_timeline_chart_data(matchid):\n \"\"\"Get some population data for Chart.JS in JSON format.\"\"\"\n ps = Match.query.get(matchid).populationstats.all()\n labels = []\n popcounts = []\n lowestPop = 100\n\n for snapshot in ps:\n labels.append(snapshot.time.strftime('%H:%M'))\n popcounts.append(snapshot.popcount)\n if snapshot.popcount is None or snapshot.popcount < lowestPop:\n lowestPop = snapshot.popcount\n\n return json.dumps(labels), json.dumps(popcounts), lowestPop\n\n return dict(add_months=add_months, antag_objs=antag_objs, modethumb=modethumb,\n population_timeline_chart_data=population_timeline_chart_data)", "def getContext(self):\n context = {}\n result = {}\n service = backendservices()\n # Grab references to all the user's StochKitJobs in the system\n all_stochkit_jobs = db.GqlQuery(\"SELECT * FROM StochKitJobWrapper WHERE user_id = :1\", self.user.email_address)\n if all_stochkit_jobs == None:\n context['no_jobs'] = 'There are no jobs in the system.'\n else:\n # We want to display the name of the job and the status of the Job.\n all_jobs = []\n status = {}\n \n for job in all_stochkit_jobs.run():\n \n # Get the job id\n stochkit_job = job.stochkit_job\n \n # Query the backend for the status of the job, but only if the current status is not Finished\n if not stochkit_job.status == \"Finished\":\n try:\n if stochkit_job.resource == 'Local':\n # First, check if the job is still running\n res = service.checkTaskStatusLocal([stochkit_job.pid])\n if res[stochkit_job.pid]:\n stochkit_job.status = \"Running\"\n else:\n # Check if the signature file is present, that will always be the case for a sucessful job.\n # for ssa and tau leaping, this is means.txt\n # for ode, this is output.txt\n\n if stochkit_job.exec_type == 'stochastic':\n file_to_check = stochkit_job.output_location+\"/result/stats/means.txt\"\n else:\n file_to_check = stochkit_job.output_location+\"/result/output.txt\"\n \n if os.path.exists(file_to_check):\n stochkit_job.status = \"Finished\"\n else:\n stochkit_job.status = \"Failed\"\n \n elif stochkit_job.resource == 'Cloud':\n # Retrive credentials from the datastore\n if not self.user_data.valid_credentials:\n return {'status':False,'msg':'Could not retrieve the status of job '+stochkit_job.name +'. Invalid credentials.'}\n credentials = self.user_data.getCredentials()\n\n # Check the status on the remote end\n taskparams = {'AWS_ACCESS_KEY_ID':credentials['EC2_ACCESS_KEY'],'AWS_SECRET_ACCESS_KEY':credentials['EC2_SECRET_KEY'],'taskids':[stochkit_job.pid]}\n task_status = service.describeTask(taskparams)\n job_status = task_status[stochkit_job.pid]\n # It frequently happens that describeTasks return None before the job is finsihed.\n if job_status == None:\n stochkit_job.status = \"Unknown\"\n else:\n\n if job_status['status'] == 'finished':\n # Update the stochkit job \n stochkit_job.status = 'Finished'\n stochkit_job.output_url = job_status['output']\n stochkit_job.uuid = job_status['uuid']\n \n elif job_status['status'] == 'Failed':\n stochkit_job.status == 'Failed'\n elif job_status['status'] == 'pending':\n stochkit_job.status = 'Pending'\n else:\n # The state gives more fine-grained results, like if the job is being re-run, but\n # we don't bother the users with this info, we just tell them that it is still running. \n stochkit_job.status == 'Running'\n \n except Exception,e:\n result = {'status':False,'msg':'Could not determine the status of the jobs.'+str(e)}\n \n all_jobs.append(stochkit_job)\n # Save changes to the status\n job.put()\n \n context['all_jobs']=all_jobs\n \n return dict(result,**context)", "def base_data_manager(wrapped):\n\n @check_session\n def wrapper(request, *arg, **kwargs):\n\n @cache_region.cache_on_arguments()\n def get_data_manager(collection, journal, document, range_start, range_end):\n code = document or journal or collection\n data = {}\n\n xylose_doc = request.stats.articlemeta.document(document, collection) if document else None\n\n if xylose_doc and xylose_doc.publisher_id:\n data['selected_document'] = xylose_doc\n data['selected_document_code'] = document\n journal = document[1:10]\n\n collections = request.stats.articlemeta.certified_collections()\n journals = request.stats.articlemeta.collections_journals(collection)\n selected_journal = journals.get(journal, None)\n selected_journal_code = journal if journal in journals else None\n\n today = datetime.datetime.now()\n y3 = today - datetime.timedelta(365*3)\n y2 = today - datetime.timedelta(365*2)\n y1 = today - datetime.timedelta(365*1)\n\n data.update({\n 'collections': collections,\n 'selected_code': code,\n 'selected_journal': selected_journal,\n 'selected_journal_code': selected_journal_code,\n 'selected_document_code': document or None,\n 'selected_collection': collections[collection],\n 'selected_collection_code': collection,\n 'journals': journals,\n 'range_start': range_start,\n 'range_end': range_end,\n 'today': today.isoformat()[0:10],\n 'y3': y3.isoformat()[0:10],\n 'y2': y2.isoformat()[0:10],\n 'y1': y1.isoformat()[0:10]\n })\n\n return data\n\n collection_code = request.session.get('collection', None)\n journal_code = request.session.get('journal', None)\n under_development = request.session.get('under_development', '')\n range_end = request.session.get('range_end', datetime.datetime.now().isoformat()[0:10])\n range_start = request.session.get('range_start', (datetime.datetime.now() - datetime.timedelta(365*3)).isoformat()[0:10])\n document_code = utils.REGEX_ARTICLE.match(request.session.get('document', ''))\n if document_code:\n document_code = document_code.string\n\n data = get_data_manager(collection_code, journal_code, document_code, range_start, range_end)\n data['locale'] = request.session.get('_LOCALE_', request.locale_name)\n data['under_development'] = [i for i in aslist(request.registry.settings.get('under_development', '')) if i != under_development]\n data['google_analytics_code'] = os.environ.get(\n 'GOOGLE_ANALYTICS_CODE',\n request.registry.settings.get('google_analytics_code', None)\n )\n data['google_analytics_sample_rate'] = os.environ.get(\n 'GOOGLE_ANALYTICS_SAMPLE_RATE',\n request.registry.settings.get('google_analytics_sample_rate', '100')\n )\n data['subject_areas'] = request.stats.publication.list_subject_areas(data['selected_code'], data['selected_collection_code'])\n data['languages'] = [(i, choices.ISO_639_1.get(i.upper(), 'undefined')) for i in request.stats.publication.list_languages(data['selected_code'], data['selected_collection_code'])]\n data['publication_years'] = request.stats.publication.list_publication_years(data['selected_code'], data['selected_collection_code'])\n if len(data['publication_years']) == 0:\n data['publication_years'] = [str(datetime.datetime.now().year)]\n py = '-'.join([data['publication_years'][0], data['publication_years'][-1]])\n data['py_range'] = request.session.get('py_range', py).split('-')\n data['sa_scope'] = request.session.get('sa_scope', data['subject_areas'])\n data['la_scope'] = request.session.get('la_scope', [k for k,v in data['languages']])\n data['content_scope'] = 'document' if data['selected_document_code'] else 'journal' if data['selected_journal_code'] else 'collection' if data['selected_collection_code'] else 'network'\n data['share_this_url'] = current_url(request.url, data)\n\n setattr(request, 'data_manager', data)\n\n return wrapped(request, *arg, **kwargs)\n\n wrapper.__doc__ = wrapped.__doc__\n\n return wrapper", "def data_for_all(request):\n data = common_data(request)\n data.update({\"tags\": Tag.used_tags(),\n \"archive_qualifier\": \"\",\n \"recent_active_months\": Blog.recent_active_months()})\n return data", "def update_statistics_counter(request):\n try:\n u = UserProfile.objects.all().count()\n s = StaticMicrotask.objects.filter(scoring_done=1).count()\n a = Task.objects.filter(published=1).count()\n \n st = StatCounter()\n st.registered_users = u\n st.translated_sentences = s\n st.published_articles = a\n st.created_on = datetime.datetime.now()\n st.save()\n \n data = {'msg':''}\n messages.success(request, \"Website statistics updated successfully.\")\n return render_to_response('my_admin_tools/menu/background_task.html',data,context_instance=RequestContext(request))\n except:\n msg = traceback.format_exc()\n data = {'msg':msg}\n messages.error(request, \"Update Website statistics failed.\")\n return render_to_response('my_admin_tools/menu/background_task.html',data,context_instance=RequestContext(request))", "def get_context_data(self, **kwargs):\n data = super(SemesterBasedMixin, self).get_context_data(**kwargs)\n data['sem_year'], data['sem_month'] = self.get_year_and_month()\n if self.fetch_semester:\n data['semester'] = self.get_semester()\n return data", "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n miid = self.kwargs['proyecto']\n\n grupos = Group.objects.all()\n grupList = []\n for grupo in grupos:\n numero, divisor, nombre = grupo.name.partition('_')\n if (int(numero) == miid):\n grupList += [{'grupo':grupo,'nombre':nombre}]\n\n context['proyectos'] = Proyecto.objects.get(id_proyecto=miid)\n\n context['listGroup'] = grupList\n context['idProyecto'] = miid\n return context", "def get(self):\n\t\treturn {\n\t\t\t'system': self.get_system_information(),\n\t\t\t'cpu': self.get_cpu_stats(),\n\t\t\t'gpu': self.get_gpu_stats(),\n\t\t\t'ram': self.get_ram_stats(),\n\t\t\t'storage': self.get_storage_stats(),\n\t\t\t'battery': self.get_battery_stats(),\n\t\t\t'temps': self.get_temperatures()\n\t\t}", "def default_context(request):\n return {\n 'GOOGLE_ANALYTICS_ID': settings.GOOGLE_ANALYTICS_ID,\n 'SITE_TITLE': settings.SITE_TITLE,\n }", "def gatherActiveDataStats(self, config):\n gatherWMDataMiningStats(config.wmstats_url, config.reqmgrdb_url, \n config.wmdatamining_url, False, log = cherrypy.log)\n return", "def get_project_stats(self, pool, project):\n svc = self.project_path % (pool, project)\n ret = self.rclient.get(svc)\n if ret.status != restclient.Status.OK:\n exception_msg = (_('Error getting project stats: '\n 'pool: %(pool)s '\n 'project: %(project)s '\n 'return code: %(ret.status)d '\n 'message: %(ret.data)s.')\n % {'pool': pool,\n 'project': project,\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n raise exception.InvalidInput(reason=exception_msg)\n val = jsonutils.loads(ret.data)\n avail = val['project']['space_available']\n return avail", "def stats(request):\n \n return render(request, 'stats.html')" ]
[ "0.55988616", "0.5566804", "0.5502004", "0.54921055", "0.53882295", "0.5386748", "0.53179926", "0.52953476", "0.5235253", "0.52319646", "0.5156149", "0.51293546", "0.5121426", "0.51195425", "0.5102308", "0.50859106", "0.5080095", "0.5077161", "0.5069672", "0.5065913", "0.50616634", "0.50505674", "0.50490266", "0.50470763", "0.50303894", "0.5008152", "0.5007032", "0.50036097", "0.4994816", "0.4991482", "0.49805817", "0.49732333", "0.49626377", "0.4935028", "0.49224484", "0.49096754", "0.49079385", "0.49037284", "0.49022117", "0.48962197", "0.4893455", "0.48848286", "0.48840523", "0.48710424", "0.48708034", "0.4864129", "0.48629266", "0.48629144", "0.48546374", "0.48541138", "0.48535305", "0.48489", "0.48260394", "0.48166516", "0.48067072", "0.48053795", "0.48026654", "0.48018587", "0.4795042", "0.47860715", "0.47831872", "0.47751924", "0.47666353", "0.47400394", "0.47343338", "0.4727515", "0.4727253", "0.4725935", "0.47216123", "0.47193182", "0.47132584", "0.47086507", "0.4707639", "0.47060636", "0.4697811", "0.4697322", "0.468513", "0.46846953", "0.46841764", "0.46831483", "0.46726447", "0.46635455", "0.46613717", "0.46564913", "0.46506986", "0.46504736", "0.4650437", "0.4648704", "0.464864", "0.4646137", "0.46377173", "0.46347225", "0.4632426", "0.4631897", "0.4631504", "0.4627899", "0.4627431", "0.46266973", "0.46219456", "0.46215898" ]
0.7520238
0
Returns True if both submodels have same length
def _validate_submodels(self, type_promax, type_ms): return type_promax in self._submodels and \ type_ms in self._submodels and \ len(self._submodels[type_promax]) > 0 and \ len(self._submodels[type_promax]) == len(self._submodels[type_ms])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __len__(self):\n assert len(self.sent1) == len(self.sent2)\n return len(self.sent1)", "def __eq__(self, other):\n return isinstance(other, type(self)) and self.size == other.size", "def equal_size(self, other):\n if not isinstance(other, Matrix):\n raise ValueError(\"Can only compare two matrices\")\n return other.m == self.m and other.n == self.n", "def __len__(self):\n return max(self.A_size, self.B_size)", "def is_full(self) -> bool:", "def fits_into(self, other) -> bool:\n return other.width > self.width and other.height > self.height", "def equal_length(crime):\n length = len(crime[0])\n for i in range(len(crime)):\n boo = length == len(crime[i])\n if boo == False:\n return(False)\n break\n return(True)", "def is_full(self):", "def handle_same_length(self, a, b):\n found = False\n for i, j in zip(a, b):\n if i == j:\n continue\n elif found:\n return False # this case is the second found edit, thus return false\n else:\n found = True\n return True", "def __eq__(self, other):\n if isinstance(other, DenseUnit):\n return (Counter(self.dimension) == Counter(other.dimension) and Counter(self.points) == Counter(\n other.points))\n return False", "def subcontrary_with(self, other: 'Concept') -> bool:\n return (self._extent & other._extent\n and (self._extent | other._extent) == self.lattice.supremum._extent)", "def matches(self, other):\n return ( all([i==j or i<0 or j<0 for i, j in zip(self._data, other._data)])\n and len(self._data) == len(other._data) )", "def unitset_is_consistent(self, other: \"UnitSet\"):\n return all(getattr(self, q) is getattr(other, q) for q in self._base_quantities)", "def pareto_better(self, other: \"EvalItem\") -> bool:\n return self.size <= other.size and other.result <= self.result", "def models_are_equivalent(model_a: TopLevelOscalModel, model_b: TopLevelOscalModel) -> bool:\n # this will change the second model as a side-effect\n model_b.metadata.last_modified = model_a.metadata.last_modified\n return model_a == model_b", "def is_consistent(self, other):\n return self.name != other.name or self.type is other.type", "def is_subset(self, other):", "def test_same_models(self):\n\t\t\n\t\t# TODO: finish\n\t\tpass", "def cmp_sz(cls, first, second):\n return first.size == second.size", "def isFull(self):\n return self.rear == self.size", "def __len__(self):\n return len(self.first)", "def __eq__(self, other):\n\n for vert in self:\n if vert not in other:\n return False\n if len(self) == len(other):\n return True", "def is_identical(self, other):\n return (self.compounddatatype == other.compounddatatype and\n self.min_row == other.min_row and\n self.max_row == other.max_row)", "def test_SameNumberOfFields(self):\n pass", "def is_mutually_bit_compatible_with(self, other: 'CompositeType') -> bool:\n return self.bit_length_set == other.bit_length_set", "def compatible(self, other):\n return (hasattr(other, 'fft') and getattr(other, 'fft', np.array(\n [])).shape == self.fft.shape, super(cfft, self).compatible(other))", "def is_strict_subset(self, other):\n return self.is_subset(other) and self != other", "def attr_has_same_shape(first_obj, second_obj, attr):\n # Support legacy `lace.mesh.Mesh`, where attrs are set to `None` instead\n # of empty arrays.\n first, second = getattr(first_obj, attr), getattr(second_obj, attr)\n if first is None or second is None:\n return first is second\n else:\n return first.shape == second.shape", "def _is_equal_same_type(self, other):\n return True", "def __ge__(self, other: Schema) -> bool:\n return set(self.items()) >= set(other.items())", "def is_longer(dna1, dna2):\n return len(dna1)> len(dna2)", "def almost_equals(self, other):\n if self.__class__ is other.__class__ and len(self) == len(other):\n for a, b in zip(self, other):\n if not a.almost_equals(b):\n return False\n return True\n else:\n return False", "def is_full(self):\n return set(self._parent.letters()) == set(self.winners())", "def __eq__(self, other):\n s = len(self)\n r = len(other)\n\n if s != r:\n raise(VetorError, \"Vetor dimensions are not equal\")\n\n # Two vectors are numericaly the same if the difference\n # between both of them are smaller than given precisao\n for i in range(s):\n if not comozero(self[i] - other[i]):\n return False\n\n return True", "def same_length(*args):\n if len({len(word) for word in args}) <= 1:\n return True\n return False", "def _checkSize(X1,X2):\n \n if len(X1) != len(X2):\n raise ValueError, 'Lists are differnt lengths'", "def verifyModels(self):\r\n\r\n #\r\n # now check that all models have the same poly data in the\r\n # model node as in the display node\r\n #\r\n polyDataInScene = []\r\n fileNamesInScene = []\r\n success = True\r\n numModels = slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelNode\" )\r\n for n in range(numModels):\r\n modelNode = slicer.mrmlScene.GetNthNodeByClass( n, \"vtkMRMLModelNode\" )\r\n polyDataInScene.append(modelNode.GetPolyData())\r\n for dn in range(modelNode.GetNumberOfDisplayNodes()):\r\n displayNode = modelNode.GetNthDisplayNode(dn)\r\n if modelNode.GetPolyData() != displayNode.GetInputPolyData():\r\n self.delayDisplay(\"Model %d does not match its display node %d! (name: %s, ids: %s and %s)\" % (n,dn,modelNode.GetName(), modelNode.GetID(),displayNode.GetID()))\r\n success = False\r\n for sn in range(modelNode.GetNumberOfStorageNodes()):\r\n storageNode = modelNode.GetNthStorageNode(sn)\r\n fileName = storageNode.GetFileName()\r\n fileNamesInScene.append(fileName)\r\n if fileName in fileNamesInScene:\r\n self.delayDisplay(\"Model %d has duplicate file name %s! (ids: %s and %s)\" % (n,fileName,modelNode.GetID(),storageNode.GetID()))\r\n success = False\r\n\r\n\r\n #\r\n # now check that each model has a unique polydata\r\n #\r\n for n in range(numModels):\r\n modelNode = slicer.mrmlScene.GetNthNodeByClass( n, \"vtkMRMLModelNode\" )\r\n if polyDataInScene.count(modelNode.GetPolyData()) > 1:\r\n self.delayDisplay(\"Polydata for Model is duplicated! (id: %s and %s)\" % (n,modelNode.GetID()))\r\n success = False\r\n\r\n return success", "def __eq__(self, other):\n \n if isinstance(other, Atlas):\n return self.size == other.size\n\n return False", "def __len__(self):\n return len(self.__parent)", "def empty(self):\n return len(self.a) + len(self.b) == 0", "def isFull(self) -> bool:\n return self.size == self.maxlen", "def check_shape(layer1, layer2, attr):\n attr1 = getattr(layer1, attr, None)\n attr2 = getattr(layer2, attr, None)\n if not attr1:\n return not attr2\n return all(attr1.shape.eval() == attr2.shape.eval())", "def test_candidatesubr_len(self):\n\n self.assertEqual(len(self.cand_subr), 3)", "def valid_merge( group1, group2, target_dir, max_list_size, split_toplevel=True ):\n if len( group1 ) <= 0 or len( group2 ) <= 0:\n return True\n if ( lsize( group1 ) + lsize( group2 ) ) <= max_list_size:\n return ( not split_toplevel ) or toplevel_subdir( group1[0].path, target_dir ) == toplevel_subdir( group2[0].path, target_dir )\n return False", "def __eq__(self: 'TOAHModel', other: 'TOAHModel') -> bool:\n return self.stool_lst == other.stool_lst", "def conformability(self, other):\r\n if self.columns == other.rows:\r\n return True\r\n else:\r\n return False", "def isSubset(self, other):\n for val, freq in self.items():\n if freq > other.freq(val):\n return False\n return True", "def is_full(self):\n return self.idx == self.len", "def is_similar_with(self, other):\n\n # corresponding angles are congruent\n if self.angles != other.angles:\n return False\n # corresponding sides are proportional\n proportion = self.perimeter() / other.perimeter()\n for i in range(len(self.lengths)):\n if self.lengths[i]/other.lengths[i] != proportion:\n return False\n return True", "def filled(self):\n return len(self) == self.length", "def is_strict_superset(self, other):\n return self.is_superset(other) and self != other", "def is_compatible(self, other):\n return self.intervals == other.intervals and\\\n self.nonderived_directions == other.nonderived_directions", "def _compare_length(node1, node2):\n if node1.length is None and node2.length is None:\n return True\n elif (node1.length is None) ^ (node2.length is None):\n return False\n elif isclose(node1.length, node2.length) is False:\n return False\n return True", "def is_full(self):\n elements_in_sects = sum(\n map(opr.attrgetter(\"size\"), self.sects.values())\n )\n elements_in_total = fct.reduce(\n opr.mul, type(self).flatten_shape(self.shape), 1\n )\n res = elements_in_sects >= elements_in_total\n return res", "def check_lengths(self, length: Expr) -> bool:\n for point1 in self.points:\n for point2 in self.points - {point1}:\n if abs(point2 - point1) == length:\n print(f'Length {length} found between points: {point1} and {point2}')\n return True\n return False", "def table_contains_elements(self, model):\n if model.objects.count() > 1:\n return True\n return False", "def is_equal(self, other) -> bool:\n if isinstance(other, numbers.Number):\n return not self.num_variables and bool(self.offset == other)\n # todo: performance\n\n try:\n if callable(other.vartype):\n vartype_eq = all(self.vartype(v) == other.vartype(v) for v in self.variables)\n else:\n vartype_eq = all(self.vartype(v) == other.vartype for v in self.variables)\n\n return (vartype_eq\n and self.shape == other.shape # redundant, fast to check\n and self.offset == other.offset\n and self.linear == other.linear\n and self.adj == other.adj)\n except AttributeError:\n return False", "def empty(self):\r\n if len(self.s1)==0 and len(self.s2)==0:\r\n return True", "def __len__(self):\n return len(self[0]) + len(self[1])", "def is_proper_subset(self, other):\n if not isinstance(other, SetPy):\n raise TypeError(\"Can only be proper subset of another SetPy\")\n return self.is_subset(other) and not self == other", "def is_full(self):\n return len(self.keys) > self.m", "def does_contain(self, other):\n if len(self.reactants) != len(other.retrons):\n return False\n for mols in itertools.permutations(self.reactants):\n if all(m.HasSubstructMatch(p)\n for m, p in zip(mols, other.retrons)):\n return True\n return False", "def is_full(self):\n\n return self.count == len(self.array)", "def _matchLength(self, length: int):\n return self._comparator['Length'] < length", "def __len__(self):\n return 1", "def _values_is_equal(self, a, b):\n if a is None or b is None:\n return False\n\n a = a.split()\n b = b.split()\n\n if len(a) != len(b):\n return False\n\n return len([i for i, j in zip(a, b) if i == j]) == len(a)", "def _shape_compare(shape1, shape2):\n if len(shape1) != len(shape2):\n return False\n for s1, s2 in zip(shape1, shape2):\n if s1 != s2:\n return False\n return True", "def compare(self, other):\n return len(self & other) / max(len(self | other), 1)", "def sizeIsVlen(self):\n return self.rank > 0 and 'vlen' in self.tags", "def _only_one_type(self):\n num_larger_than_1 = 0\n for symb, indices in self.atoms_indx.items():\n if len(indices) > 0:\n num_larger_than_1 += 1\n return num_larger_than_1 <= 1", "def __gt__(self, other: Schema) -> bool:\n return set(self.items()) > set(other.items())", "def __len__(self):\n return 2", "def __len__(self):\n return 2", "def __eq__(self, other):\n return isinstance(other, Bag) and Counter(self.items) == Counter(other.items)", "def _assert_same_length(\n list_series_1: Sequence[TimeSeries],\n list_series_2: Sequence[TimeSeries],\n):\n\n raise_if_not(\n len(list_series_1) == len(list_series_2),\n \"Sequences of series must be of the same length, found length:\"\n + f\" {len(list_series_1)} and {len(list_series_2)}.\",\n )", "def is_full(self):\n return len(self._data) == 1", "def __eq__(self, other: 'ModelParameters') -> bool:\n if not isinstance(other, ModelParameters) or len(self) != len(other):\n return False\n else:\n return all(torch.equal(p_self, p_other) for p_self, p_other in zip(self.parameters, other.parameters))", "def check_if_full(self):\n pass", "def e_paralelo(self, other):\n if (self == other) or (self.normaliza() == other.normaliza()):\n return True\n else:\n return False", "def equal_levels(self, other: MultiIndex) -> bool:\n if self.nlevels != other.nlevels:\n return False\n\n for i in range(self.nlevels):\n if not self.levels[i].equals(other.levels[i]):\n return False\n return True", "def __len__(self) :\n\n return self.len_all() // settings.WORLD_SIZE", "def is_full(self):\n for i in range(self.width):\n if self.can_add_to(i) == True:\n return False\n return True", "def is_full(self):\n return len(self.keys) == self.order", "def is_same(self: _R, other: _R) -> bool:\n children = [i.render() for i in self.children]\n other_children = [i.render() for i in other.children]\n return other_children == children", "def __len__(self):\n return len(self.beams)", "def is_subtype_of(self, other):\n if type(self) is not type(other):\n return False\n\n if (not self._transform_is_composite and\n self.transform_or_spec != other.transform_or_spec):\n return False\n\n # pylint: disable=protected-access\n try:\n tf.nest.assert_same_structure((self._specs, self._unique_id_params),\n (other._specs, other._unique_id_params))\n except (TypeError, ValueError):\n return False\n\n self_elements = tf.nest.flatten((self._specs, self._unique_id_params))\n other_elements = tf.nest.flatten((other._specs, other._unique_id_params))\n\n def is_subtype_or_equal(a, b):\n try:\n return a.is_subtype_of(b)\n except AttributeError:\n return a == b\n\n return all(\n is_subtype_or_equal(self_element, other_element)\n for (self_element, other_element) in zip(self_elements, other_elements))", "def is_identical(self, other):\n if self.is_input != other.is_input:\n return False\n\n if self.is_raw() and other.is_raw():\n return True\n if self.is_raw() or other.is_raw():\n return False\n return self.structure.is_identical(other.structure)", "def is_equal(a: list[int], b: list[int]) -> bool:\n a_length: int = len(a)\n b_length: int = len(b)\n if a_length == 0 and b_length == 0:\n return True\n else:\n i = 0\n if a_length == b_length:\n if a_length <= len(b):\n while i < a_length:\n if a[i] == b[i]:\n return True\n else:\n i += 1\n return False\n else:\n while i < b_length:\n if a[i] == b[i]:\n return True\n else:\n i += 1\n return False\n else:\n return False", "def compatible(self, other):\n compatible = ((self.ndim == other.ndim) and\n self.regular and\n other.regular and\n all(numpy.allclose(sw[0], ow[0]) for (sw, ow) in\n zip(self.binwidths, other.binwidths)))\n return compatible", "def __eq__(self: 'Cheese', other: 'Cheese') -> bool:\n return isinstance(other, Cheese) and self.size == other.size", "def equal(self, second_bag: object) -> bool:\n if self.da.length() == second_bag.da.length():\n counter = 0\n try:\n newArr = StaticArray(self.da.length())\n newArr1 = StaticArray(second_bag.da.length())\n except StaticArrayException:\n return True\n for i in range(self.da.length()):\n newArr[i] = self.da[i]\n for j in range(self.da.length()):\n newArr1[j] = second_bag.da[j]\n for i in range(newArr.size()):\n for j in range(newArr1.size()):\n if newArr[i] == newArr1[j]:\n counter += 1\n newArr[i] = \"Not Equivalent\"\n newArr1[j] = \"equivalence\"\n if counter == self.da.length():\n return True\n else:\n return False\n else:\n return False", "def is_full(self) -> bool:\n pass", "def test_are_duplicates_length(self):\n rules = [\n pd.Series({\"A\": \"high\", \"B\": Bounds(lower=1, upper=1), \"C\": Bounds(lower=1, upper=1), \"Class\": \"apple\"},\n name=1),\n pd.Series({\"B\": Bounds(lower=1, upper=1), \"C\": Bounds(lower=1, upper=1),\n \"Class\": \"apple\"}, name=2)\n ]\n duplicate = _are_duplicates(rules[0], rules[1])\n self.assertTrue(duplicate is False)", "def check_children_eq_parent(self):\n\t\tif len(self.tree.children) == 0:\n\t\t\treturn\n\n\t\tchild_count = 0.0\n\t\tfor child in self.tree.children:\n\t\t\tchild_count += child.utility.count\n\t\tassert self.utility.count == child_count", "def isFull(self) -> bool:\n return self._elems == self._k", "def full(self):\n return self.size >= self.maxsize", "def check_subarray(array1, array2):\r\n \r\n # check assumption\r\n if (len(array2.shape) != 1) or (array2.shape[0] != array1.shape[-1]):\r\n raise ValueError('Attempting to check for subarray equality when shape assumption does not hold.')\r\n \r\n return np.all(array1==array2, axis=-1)", "def __eq__(self, other):\n if self.get_dimensions() == other.get_dimensions():\n is_equal = (np.allclose(self.lon_arr, other.lon_arr) and\n np.allclose(self.lat_arr, other.lat_arr))\n else:\n is_equal = False\n return is_equal", "def have_same_shapes(array1, array2):\n return array1.shape == array2.shape", "def are_equal(self, sp1, sp2):\n return True" ]
[ "0.62707317", "0.62434", "0.6213933", "0.6097161", "0.6058098", "0.6052282", "0.6001739", "0.5992692", "0.59679824", "0.5957825", "0.595145", "0.59088415", "0.5906478", "0.5892642", "0.58872175", "0.5876356", "0.58497226", "0.58374107", "0.5794245", "0.57578903", "0.575601", "0.5755663", "0.57517475", "0.57508075", "0.5742354", "0.5742036", "0.5738218", "0.5725914", "0.57186526", "0.57062787", "0.5703994", "0.5698863", "0.5692614", "0.56882614", "0.5687171", "0.5686475", "0.568643", "0.56860673", "0.56776077", "0.56656426", "0.56602037", "0.5658893", "0.5654277", "0.56466854", "0.5638782", "0.56364584", "0.5628875", "0.56265724", "0.56214285", "0.5616368", "0.5610479", "0.56099355", "0.56043476", "0.5598508", "0.55924004", "0.5588522", "0.55726624", "0.5569539", "0.5567077", "0.556272", "0.55577874", "0.5557664", "0.55565983", "0.555144", "0.55503464", "0.55494153", "0.55461556", "0.5544599", "0.55396795", "0.553842", "0.55383766", "0.5537943", "0.5537943", "0.55264074", "0.5524441", "0.5516595", "0.55139744", "0.55138886", "0.55111104", "0.5510285", "0.55094117", "0.5509307", "0.5508582", "0.5508254", "0.550795", "0.5505192", "0.5497846", "0.54974973", "0.5495655", "0.54930633", "0.5490536", "0.54876196", "0.54802233", "0.5475036", "0.54740876", "0.5469156", "0.54684514", "0.54676497", "0.5466889", "0.5461755" ]
0.71253115
0
Validates type, returning ("normalized_type", submodel_fields)
def _validate_type(self, tp: str, name: str = None): if tp is None: return None, None fields = None if tp.startswith('{'): # Submodel defined in JSON fields = parse_json_model(tp, modelname=name) if not fields: return None, None return snake_to_camel(name), fields normal_type = get_type_from_str(tp) if normal_type != "None": tp = normal_type return tp, fields
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_model_field_types(self):\n self.assertTrue(isinstance(self.UserInfo.have_siblings, str))\n self.assertTrue(isinstance(self.UserInfo.known_env_exposures, str))\n self.assertTrue(isinstance(self.UserInfo.known_genetic_mutations, str))\n self.assertTrue(isinstance(self.UserInfo.age, int))", "def validate(self, value):\n value = super(Type,self).validate(value)\n if self.type is None:\n return value\n if value is not None and not isinstance(value,self.type):\n try:\n if isinstance(value, list) or isinstance(value, tuple): value = self.type(*value)\n elif isinstance(value, dict): value = self.type(**value)\n else: value = self.type(value)\n except: \n raise BadValueError(\"Cannot coerce: %s to %s\"% (value, self.type))\n return value", "def _validate_submodels(self, type_promax, type_ms):\n return type_promax in self._submodels and \\\n type_ms in self._submodels and \\\n len(self._submodels[type_promax]) > 0 and \\\n len(self._submodels[type_promax]) == len(self._submodels[type_ms])", "def validate(self, types):\n known = set(self.fields.keys())\n for name, definition in types.items():\n for field, schema in definition.items():\n field_kind = schema.get('kind')\n if field_kind == 'list':\n items = schema.get('items')\n if not items:\n raise(ValidationError(\n 'items is not found in {name}.{field}'.format(\n name=name, field=field\n )\n ))\n if items not in known:\n fail_field(items, field, name)\n continue\n if field_kind not in known:\n fail_field(field_kind, field, name)\n known.add(name)\n return types", "def _type_validator(self, type=None):\n if type not in ['agents', 'users', 'groups']:\n type = 'users'\n return type", "def validate_model(self) -> Tuple[T_co, T_co]:\n raise NotImplementedError", "def local_type(verifield, type_name):\n from polyglot.pyapi.meta import retrieve_schema_table_fields\n from polyglot.pyapi.instance import create_instance_validators\n from polyglot.models.schema import Instance\n (tenant_id, schema_id, table_id) = type_name.split(\"::\")\n fields = retrieve_schema_table_fields(tenant_id, schema_id, table_id)\n validators = Instance._validations\n validators['instance_data'] = create_instance_validators(fields)\n instance = Instance(**instance)\n instance.validate(validators)\n instance._validations = validators\n return not((hasattr(instance, 'validation_errors') \n and instance.validation_errors) \\\n or instance.instance_data.get('validation_errors', {}))", "def validate(self):\n fieldnames = self.positional_fieldnames + self.tagnames\n if self.vlevel == 0:\n self._validate_tagnames_and_types()\n for fieldname in fieldnames:\n self.validate_field(fieldname)\n self._validate_record_type_specific_info()", "def validate(self):\n self._check_type()", "def _ValidateType(self, local_field_names, require_guid):\n # Make sure the typename is non-empty.\n if not self.typename:\n self.AddFinding(findings_lib.MissingTypenameError(self))\n elif not isinstance(self.typename, str):\n self.AddFinding(\n findings_lib.IllegalKeyTypeError(self.typename, self.file_context))\n elif not ENTITY_TYPE_NAME_REGEX.match(self.typename):\n self.AddFinding(\n findings_lib.InvalidTypenameError(self.typename, self.file_context))\n\n # Check for correct GUID format.\n if self.guid is not None and not ENTITY_TYPE_GUID_PATTERN.match(self.guid):\n self.AddFinding(findings_lib.InvalidTypeGuidError(self))\n if require_guid and self.guid is None:\n self.AddFinding(findings_lib.MissingTypeGuidError(self))\n\n # Passthrough types cannot be inherited, so make sure they are not defined\n # as abstract.\n if self.allow_undefined_fields and self.is_abstract:\n self.AddFinding(findings_lib.AbstractPassthroughTypeError(self))\n # Make sure the type description is non-empty.\n if not self.description:\n self.AddFinding(findings_lib.MissingEntityTypeDescriptionWarning(self))\n\n # Check for duplicate local fields.\n # this check is case insensitive to catch dupes earlier in the event that\n # we stop explicitly rejecting upper case characters\n check_fields = set()\n for field in local_field_names:\n field_lower = field.lower()\n if field_lower in check_fields:\n self.AddFinding(findings_lib.DuplicateFieldError(self, field))\n continue\n check_fields.add(field_lower)\n\n # TODO(berkoben): Add more checks to validate fields in isolation\n # (in case we don't have a field set to check against)\n # (i.e. check for chirality, formatting. Could use actual Field objects)\n\n # Check formatting of field name\n if len(field.split('/')) > 2:\n self.AddFinding(findings_lib.UnrecognizedFieldFormatError(self, field))\n\n # Check for duplicate parent names.\n parent_names_check = set()\n for parent_name in self.unqualified_parent_names:\n if parent_name in parent_names_check:\n self.AddFinding(findings_lib.DuplicateParentError(self, parent_name))\n continue\n parent_names_check.add(parent_name)\n\n # Check formatting of parent name\n if len(parent_name.split('/')) > 2:\n self.AddFinding(\n findings_lib.UnrecognizedParentFormatError(self, parent_name))\n\n # Enforce that the inherited_fields_expanded field is not set\n if self.inherited_fields_expanded:\n self.AddFinding(findings_lib.InheritedFieldsSetError(self))", "def get_sub_serializer_errors(self, serializer, error_type):\n serializer.initial_data = self.initial_data.get(error_type)\n if hasattr(serializer, 'get_subclass'): # todo: check\n try:\n # try to get specialized serializer, when InheritanceModelSerializer\n # it can fail, when 'type' is missing or incorrect\n serializer = serializer.get_subclass()\n except:\n pass\n # Here is recursion, again calling DRF-friendly-errors for sub-serializer:\n serializer.is_valid()\n return serializer.errors", "def validate_type_definition(type_definition):\n # TODO:validator\n data_type = type_definition.get('data_type')\n validator = type_definition.get('validator')\n return type_definition", "def test_type(self):\n self.assertEqual(type(self.base1), BaseModel)\n self.assertEqual(type(self.base2), BaseModel)", "def get_check_types():", "def validate_type(self, validated_data):\n valid_types = ('international', 'local')\n if validated_data in valid_types:\n return validated_data\n\n raise_error(serialization_errors['invalid_flight_type'],\n raise_only_message=True)", "def _deserialize(\n self, value: Any, attr: str = None, data: Mapping[str, Any] = None, **kwargs\n ):\n errors = []\n # iterate through the types being passed into UnionField via val_types\n for field in self.valid_types:\n try:\n # inherit deserialize method from Fields class\n return field.deserialize(value, attr, data, **kwargs)\n # if error, add error message to error list\n except ValidationError as error:\n errors.append(error.messages)\n raise ValidationError(errors)", "def data_type_derived_from_validator(field, presentation, context):\n\n if _data_type_validator(field, presentation, context):\n # Validate derivation only if a complex data type (primitive types have no derivation\n # hierarchy)\n _data_type_derived_from_validator(field, presentation, context)", "def clean_typedField(self):\n data = self.cleaned_data['typedField']\n\n # Remove whitespace from each line\n cleanedData = \"\"\n for line in data.splitlines():\n stripped = line.strip()\n if len(stripped) == 0:\n continue\n cleanedData += stripped\n cleanedData += \"\\n\"\n\n # TODO: Settings in the Models for which validators to use\n\n # Validation 0 : Insure there is some data present\n if not len(cleanedData):\n raise forms.ValidationError('No data present')\n\n # Validation 1 : Insure the data is binary only\n okChars = '01?'\n for line in cleanedData.splitlines():\n allOk = all(c in okChars for c in line)\n if not allOk:\n raise forms.ValidationError('Invalid characters entered (just 0, 1, and ? please)')\n\n # Validation 2 : Insure the data is the proper number of bits in all dimensions\n colCount = 0\n rowCount = len(cleanedData.splitlines())\n bitWidth = self.instance.dieImage.bitWidth\n bitHeight = self.instance.dieImage.bitHeight\n if rowCount != bitHeight:\n raise forms.ValidationError('You gave me %d rows of typed bits but I need %d' % (rowCount, bitHeight))\n for linei, line in enumerate(cleanedData.splitlines()):\n lineLen = len(line.strip())\n if lineLen != bitWidth:\n raise forms.ValidationError('Row %d has %d bits but should have %d bits' % (linei + 1, lineLen, bitWidth))\n\n return cleanedData", "def _check_type_get_basemodels(t: type) -> List[Type[BaseModel]]:\n result = []\n if typing_inspect.is_union_type(t):\n for tt in typing_inspect.get_args(t):\n if not issubclass(tt, BaseModel):\n raise TypeError(f\"Args of Union must be BaseModels, {t} not.\")\n result.append(tt)\n else:\n try:\n if issubclass(t, BaseModel): result.append(t)\n except TypeError:\n pass\n\n from pymotyc import MotycModel\n for tt in [*result]:\n for parent in tt.__mro__:\n if parent in [MotycModel, BaseModel]: break\n if parent not in result: result.append(parent)\n\n if not result: raise TypeError(f\"Improper type {t} of the Collection.\")\n\n return result", "def test_ensure_valid_model_type(self):\n # Note the \"valid\" type strings for our test\n test_types = [\"bar\", \"foo\", \"Sreeta\", \"Feras\"]\n # Note a set of invalid type strings for the test\n bad_types = [\"Tim\", \"Sam\"]\n\n # Alias the function to be tested\n func = pylogit.pylogit.ensure_valid_model_type\n\n # Make note of part of the error message that should be raised\n partial_error_msg = \"The specified model_type was not valid.\"\n\n # Perform the requisite tests\n for good_example in test_types:\n self.assertIsNone(func(good_example, test_types))\n for bad_example in bad_types:\n self.assertRaisesRegexp(ValueError,\n partial_error_msg,\n func,\n bad_example,\n test_types)\n\n return None", "def test_type_validation(self):\r\n with self.assertRaises(ValidationError):\r\n TestSetModel.create(int_set={'string', True}, text_set={1, 3.0})", "def validate_type(in_type: Mapping[str, Any]) -> TypeDescription:\n # Check if all keys are there\n missing_keys = {\"name\", \"weights\", \"access\"}.difference(in_type.keys())\n if missing_keys:\n missing_keys = \", \".join(repr(i) for i in sorted(missing_keys))\n raise InvalidTypeDescription(\"Provided type description is missing some keys: \"\n + missing_keys)\n\n # Check in_type['name']\n if not isinstance(in_type[\"name\"], str):\n got_type = type(in_type[\"name\"]).__name__\n raise InvalidTypeDescription(f\"Value of type['name'] should be str, not {got_type}\")\n\n # Check in_type['weights']\n if not isinstance(in_type[\"weights\"], collections.abc.Mapping):\n got_type = type(in_type[\"weights\"]).__name__\n raise InvalidTypeDescription(\n \"Value of type['weights'] should be an instance of collections.abc.Mapping \"\n f\"(e.g. a dict), but {got_type} was provided\"\n )\n\n # Check in_type['access']\n if not isinstance(in_type[\"access\"], collections.abc.Sequence):\n got_type = type(in_type[\"access\"]).__name__\n raise InvalidTypeDescription(\n \"Value of type['access'] should be an instance of collections.abc.Sequence \"\n f\"(e.g. a list), but {got_type} was provided\"\n )\n\n return in_type # type: ignore", "def validate_item(self, form_item, type_):\n if form_item == \"\":\n return None\n else:\n try:\n return type_(form_item)\n except TypeError:\n return None", "def analyze_type(self, tipo):\n\n if not self.c_types:\n return self._validate_built_in(tipo)\n elif tipo in self.c_types or self._match_array(tipo, self.c_array_types):\n return self._validate_source(tipo)\n else:\n return self._validate_built_in(tipo)", "def canonical_etype(self): # -> tuple[None, None, None]:\n ...", "def _get_sub_types_of_compositional_types(compositional_type: str) -> Tuple[str, ...]:\n sub_types_list = []\n for valid_compositional_type in (\n SPECIFICATION_COMPOSITIONAL_TYPES + PYTHON_COMPOSITIONAL_TYPES\n ):\n if compositional_type.startswith(valid_compositional_type):\n inside_string = compositional_type[\n compositional_type.index(\"[\") + 1 : compositional_type.rindex(\"]\")\n ].strip()\n while inside_string != \"\":\n do_not_add = False\n if inside_string.find(\",\") == -1: # No comma; this is the last sub-type\n provisional_sub_type = inside_string.strip()\n if (\n provisional_sub_type == \"...\"\n ): # The sub-string is ... used for Tuple, e.g. Tuple[int, ...]\n do_not_add = True\n else:\n sub_type = provisional_sub_type\n inside_string = \"\"\n else: # There is a comma; this MAY not be the last sub-type\n sub_string_until_comma = inside_string[\n : inside_string.index(\",\")\n ].strip()\n if (\n sub_string_until_comma.find(\"[\") == -1\n ): # No open brackets; this is a primitive type and NOT the last sub-type\n sub_type = sub_string_until_comma\n inside_string = inside_string[\n inside_string.index(\",\") + 1 :\n ].strip()\n else: # There is an open bracket'['; this is a compositional type\n try:\n closing_bracket_index = _match_brackets(\n inside_string, inside_string.index(\"[\")\n )\n except SyntaxError:\n raise SyntaxError(\n \"Bad formatting. No matching close bracket ']' for the open bracket at {}\".format(\n inside_string[\n : inside_string.index(\"[\") + 1\n ].strip()\n )\n )\n sub_type = inside_string[: closing_bracket_index + 1].strip()\n the_rest_of_inside_string = inside_string[\n closing_bracket_index + 1 :\n ].strip()\n if (\n the_rest_of_inside_string.find(\",\") == -1\n ): # No comma; this is the last sub-type\n inside_string = the_rest_of_inside_string.strip()\n else: # There is a comma; this is not the last sub-type\n inside_string = the_rest_of_inside_string[\n the_rest_of_inside_string.index(\",\") + 1 :\n ].strip()\n if not do_not_add:\n sub_types_list.append(sub_type)\n return tuple(sub_types_list)\n raise SyntaxError(\n \"{} is not a valid compositional type.\".format(compositional_type)\n )", "def __init__(self, raw_type: Dict):\n\n self.kind = raw_type.get(\"kind\")\n self.name = raw_type.get(\"name\")\n self.description = raw_type.get(\"description\")\n self.fields: List[SchemaTypeField] = [SchemaTypeField(f) for f in raw_type.get(\"fields\") or [] if f]\n self.input_fields = [SchemaTypeInputField(i) for i in raw_type.get(\"inputFields\") or [] if i]\n self.interfaces = [SchemaTypeInterface(i) for i in raw_type.get(\"interfaces\") or [] if i]\n self.enum_values = [SchemaTypeEnum(e) for e in raw_type.get(\"enumValues\") or [] if e]\n self.possible_types = raw_type.get(\"possibleTypes\")", "def test_types(self):\n field_types = (\n ('clip_id', int), ('created_at', datetime.datetime),\n ('description', str), ('filename', str),\n ('format', smscsv.MediaFormat), ('media_id', int), ('title', str)\n )\n for item in self.items:\n for name, type_ in field_types:\n self.assertIsInstance(getattr(item, name), type_)", "def process_sub_serializer_errors(self, serializer_error_dict, error_type):\n sub_serializer_errors = serializer_error_dict.get('errors', [])\n sub_serializer_non_field_errors = serializer_error_dict.get('non_field_errors', None)\n result = []\n for sub_error in sub_serializer_errors:\n if sub_error['field'] is None:\n sub_error['field'] = error_type\n result.append(sub_error)\n if sub_serializer_non_field_errors is not None:\n result.extend(\n self.get_non_field_error_entries(sub_serializer_non_field_errors)\n )\n return result", "def build_standard_field(self, field_name, model_field_type):\n field_mapping = self.serializer_field_mapping\n field_class = field_mapping[model_field_type]\n field_kwargs = get_field_kwargs(field_name, model_field_type)\n\n if \"choices\" in field_kwargs:\n # Fields with choices get coerced into `ChoiceField`\n # instead of using their regular typed field.\n field_class = self.serializer_choice_field\n # Some model fields may introduce kwargs that would not be valid\n # for the choice field. We need to strip these out.\n # Eg. models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES)\n valid_kwargs = {\n \"read_only\",\n \"write_only\",\n \"required\",\n \"default\",\n \"initial\",\n \"source\",\n \"label\",\n \"help_text\",\n \"style\",\n \"error_messages\",\n \"validators\",\n \"allow_null\",\n \"allow_blank\",\n \"choices\",\n }\n for key in list(field_kwargs):\n if key not in valid_kwargs:\n field_kwargs.pop(key)\n\n if not issubclass(field_class, fields.CharField) and not issubclass(\n field_class, fields.ChoiceField\n ):\n # `allow_blank` is only valid for textual fields.\n field_kwargs.pop(\"allow_blank\", None)\n\n return field_class, field_kwargs", "def validate_filter_settings(cls, extra_fields: dict) -> tuple[bool, str | None]:\n if cls.extra_fields_type is None:\n return True, None\n\n try:\n cls.extra_fields_type(**extra_fields)\n except ValidationError as e:\n return False, repr(e)\n else:\n return True, None", "def check_type(self):\n return True", "def _get_key_subtype(field_meta):\n field_type = field_meta['type']\n\n if field_type == 'categorical':\n field_subtype = 'string'\n\n elif field_type in ('numerical', 'id'):\n field_subtype = field_meta['subtype']\n if field_subtype not in ('integer', 'string'):\n raise ValueError(\n 'Invalid field \"subtype\" for key field: \"{}\"'.format(field_subtype)\n )\n\n else:\n raise ValueError(\n 'Invalid field \"type\" for key field: \"{}\"'.format(field_type)\n )\n\n return field_subtype", "def test_field_types(self):\n\n for mb_model in self.mb_model_list:\n mb_fields = mb_model._meta.fields\n db_cols = connection.introspection.get_table_description(\n self.cursor, mb_model._meta.db_table)\n db_relations = connection.introspection.get_relations(\n self.cursor, mb_model._meta.db_table)\n\n for i in range(0, len(mb_model._meta.fields)):\n expected_field_type = None\n if db_relations.get(i):\n expected_field_type = u'ForeignKey'\n else:\n expected_field_type = get_field_type(db_cols[i].type_code)\n\n self.assertEqual(\n mb_fields[i].get_internal_type(),\n expected_field_type\n )", "def validate_fields(cls, message_type: str, attachment_data: dict) -> None:", "def get_expected_type(self) -> Type[Any]:\n types = get_type_hints(self.model.record)\n\n if self.pivot:\n return Dict[types[self.pivot], types[self.field]] # type: ignore\n\n return List[types[self.field]] # type: ignore", "def _validate_fields(self, change_fields):\n pass", "def validate_full_schema(self):\n #self.check_duplicate_labels()\n for record in self.extension_schema['schema']['@graph']:\n #self.check_whether_atid_and_label_match(record)\n if record['@type'] == \"rdfs:Class\":\n self.validate_class_schema(record)\n #self.validate_class_label(record[\"@id\"])\n self.validate_validation_field(record)\n elif record['@type'] == \"rdf:Property\":\n self.validate_property_schema(record)\n #self.validate_property_label(record[\"@id\"])\n #self.validate_domainIncludes_field(record[\"http://schema.org/domainIncludes\"])\n #self.validate_rangeIncludes_field(record[\"http://schema.org/rangeIncludes\"])\n #else:\n # raise ValueError('wrong @type value found: {}'.format(record))", "def _validate(self):\n fields, schema = self.__dict__, self._def.default\n extra_fields = fields.viewkeys() - schema.viewkeys()\n if len(extra_fields) > 0:\n raise AttributeError('Fields found that are not in the schema: %r' % (list(extra_fields)))\n for key in fields.iterkeys():\n if type(fields[key]) is not type(schema[key]):\n raise AttributeError('Invalid %s for field \"%s\", should be %s' %\n (type(fields[key]), key, type(schema[key])))", "def _validate_nested_list_type(self, name, obj, nested_level, *args):\n if nested_level <= 1:\n self._validate_list_type(name, obj, *args)\n else:\n if obj is None:\n return\n if not isinstance(obj, list):\n raise TypeError(self.__class__.__name__ + '.' + name + ' contains value of type ' +\n type(obj).__name__ + ' where a list is expected')\n for sub_obj in obj:\n self._validate_nested_list_type(name, sub_obj, nested_level - 1, *args)", "def validate(t):\n return _dispatcher[type(t)](t)", "def test_type_validation(self):\r\n with self.assertRaises(ValidationError):\r\n TestListModel.create(int_list=['string', True], text_list=[1, 3.0])", "def test_validate_type_ok(self, field_type, value):\n opt = scheme.Option('test-option', field_type=field_type)\n opt.validate('foo', value)", "def _assert_input_object_types_equal(self, type1, type2):\n self.assertEqual(type1.name, type2.name)\n self.assertEqual(type1.description, type2.description)\n self.assertEqual(\n set(type1.fields.iterkeys()), set(type2.fields.iterkeys()))\n for name, t in type1.fields.iteritems():\n self.assertEqual(t.type_str(), type2.fields[name].type_str())", "def __match_info_with_type(self, info: SheetInfo):\n if info.info_type is str:\n return QLineEdit()\n elif info.info_type is bool:\n return QCheckBox()\n else:\n raise TypeError(\"There is no matched GUI type for \" + str(info.info_type))", "def test_types(question):\n instance = question[\"instance\"]\n for name, data in instance.get(\"variables\", {}).items():\n assert \"optional\" not in data or isinstance(data[\"optional\"], bool)\n if data.get(\"type\") == \"boolean\":\n assert \"value\" not in data or isinstance(data[\"value\"], bool)\n elif data.get(\"type\") in [\"integer\", \"long\"]:\n assert \"value\" not in data or isinstance(data[\"value\"], int)", "def init_value(self, value, strict: bool = True):\n value = super().init_value(value)\n if isinstance(self.data_type, MetaModel):\n t = self.data_type.get_class()\n elif issubclass(self.data_type, SimpleField):\n t = self.data_type.static_field_type\n else:\n raise MetaTypeException(f'The data_type must be either a '\n f'SimpleField or a MetaModel instance, not '\n f'a {type(self.data_type)}.')\n for el in value:\n # noinspection PyTypeHints\n if not isinstance(el, t):\n raise ModelInitException(f'The type of the {el} is not {t}')\n return value", "def get_field_type(connection, table_name, row):\n field_params = OrderedDict()\n field_notes = []\n is_geometry = False\n try:\n field_type = connection.introspection.get_field_type(row[1], row)\n except KeyError:\n field_type = 'TextField'\n field_notes.append('This field type is a guess.')\n\n # This is a hook for data_types_reverse to return a tuple of\n # (field_type, field_params_dict).\n if type(field_type) is tuple:\n field_type, new_params = field_type\n field_params.update(new_params)\n\n # Add max_length for all CharFields.\n if field_type == 'CharField' and row[3]:\n field_params['max_length'] = int(row[3])\n\n if field_type == 'DecimalField':\n if row[4] is None or row[5] is None:\n field_notes.append(\n 'max_digits and decimal_places have been guessed, as this '\n 'database handles decimal fields as float')\n field_params['max_digits'] = row[4] if row[4] is not None else 10\n field_params['decimal_places'] = row[\n 5] if row[5] is not None else 5\n else:\n field_params['max_digits'] = row[4]\n field_params['decimal_places'] = row[5]\n\n if field_type == 'GeometryField':\n geo_col = row[0]\n # Getting a more specific field type and any additional parameters\n # from the `get_geometry_type` routine for the spatial backend.\n field_type, geo_params = connection.introspection.get_geometry_type(\n table_name, geo_col)\n field_params.update(geo_params)\n is_geometry = True\n\n return field_type, field_params, is_geometry\n # return getattr(models.fields, field_type), field_params", "def _add_type_specific_repr_fields(self, repr_parts):", "def expected_form(self) -> str:\n return self.get_type()", "def CheckType(self, *args, **kwargs):\n pass", "def get_model_type(self):\n pass", "def _validate_value_type_for_field(accepted_types: Tuple[Type[Any], ...], value: Any) -> None:\n if value is not None and not isinstance(value, accepted_types):\n pretty_types = \" ,\".join(\"'\" + accepted_type.__name__ + \"'\" for accepted_type in accepted_types)\n raise FieldValueValidationError(\n f\"Value '{value}' has invalid type '{value.__class__.__name__}'. Allowed types are: {pretty_types}\"\n )", "def validVarConstructType(self,vartype):\r\n indArray = vartype.find('[]')\r\n if indArray>0:\r\n thisType = vartype[0:indArray]\r\n isArray = True\r\n else:\r\n thisType = vartype\r\n isArray = False\r\n \r\n if thisType in ('rng','range'):\r\n type = 'range'\r\n elif thisType in ('rate'):\r\n type = 'rate'\r\n elif thisType in ('amt','amount'):\r\n type = 'amount'\r\n elif thisType in ('minamt','minamount'):\r\n type = 'minamount'\r\n elif thisType in ('bool'):\r\n type = 'bool'\r\n else:\r\n print 'variable type must be range, rate, amount, minamount, bool (or abbreviated forms)'\r\n return False, ''\r\n \r\n return True, type, isArray", "def fields_validator():\n\n return validator.BrewerySchema()", "def test_type(self):\n b = BaseModel()\n self.assertIsInstance(self.my_model.id, str)\n self.assertIsInstance(self.my_model.name, str)\n self.assertIsInstance(self.my_model.my_number, int)\n self.assertEqual(type(self.my_new_model.created_at), datetime)", "def _check_structure(input_dict, mandatory, model):\n\n # Check to see if the input dictionary has the keys for the mandatory metadata structure.\n for key, value in mandatory.items():\n if 'custom_fields' in input_dict:\n if key not in input_dict and key not in input_dict['custom_fields']:\n raise ValueError('input dictionary does not have mandatory key: {key}'.format(key=key))\n else:\n if key not in input_dict:\n raise ValueError('input dictionary does not have mandatory key: {key}'.format(key=key))\n # Check to see if the input dictionary has keys that are wrong.\n for key, value in input_dict.items():\n # Checks to see if keys of input dictionary are in the model dictionary.\n if key != 'custom_fields':\n if key not in model:\n raise ValueError('Unknown input dictionary key: {key}.'.format(key=key))\n\n # If the model dictionary key value is a list check to see if value in list are correct type.\n if type(value) is list:\n if type(value[0]) is not model[key][0]:\n err_message = 'input dictionary key: {ky} list type: {ty} is not {ref}'\n err_message = err_message.format(ky=key, ty=value[0], ref=model[key][0])\n raise ValueError(err_message)\n\n else:\n # Checks to see if the type of the value for key is correct, in comparison to the model dictionary.\n if type(value) is not model[key]:\n err_message = 'input dictionary key: {ky} type: {ty} is not {ref}'\n err_message = err_message.format(ky=key, ty=type(value), ref=model[key])\n raise ValueError(err_message)\n return True", "def data_type_properties_validator(field, presentation, context):\n\n field.default_validate(presentation, context)\n\n values = getattr(presentation, field.name)\n if values is not None:\n if presentation._get_primitive_ancestor(context) is not None:\n context.validation.report(\n u'data type \"{0}\" defines properties even though it has a primitive ancestor'\n .format(presentation._fullname),\n locator=presentation._get_child_locator(field.name), level=Issue.BETWEEN_TYPES)", "def _process_validator_results(ret, level, object_data, obj):\n\n # The first object in the tuple is the one being validated\n if isinstance(obj, tuple):\n real_obj = obj[0]\n else:\n real_obj = obj\n\n if not ret:\n is_valid = True\n return is_valid\n\n if isinstance(ret, string_types):\n ledger.add_message(ret, level, object_data)\n is_valid = False\n\n elif isinstance(ret, dict):\n for field_name, error in list(ret.items()):\n # verbose_field_name = ledger.map_field_name_to_verbose_name(obj, field_name)\n object_data_with_field = object_data.copy()\n object_data_with_field[\"field\"] = field_name\n if field_name_mapper is None:\n # raise RuntimeError(\"A field_name_mapper was not supplied to this validator.\")\n verbose_name = None\n else:\n verbose_name = field_name_mapper(real_obj, field_name)\n if verbose_name is None:\n from titlecase import titlecase\n\n verbose_name = titlecase(\" \".join(field_name.split(\"_\")))\n\n object_data_with_field[\"verbose_name\"] = verbose_name\n if include_field_name_in_message:\n error = \"{}: {}\".format(verbose_name, error)\n else:\n error = \"{}\".format(error)\n ledger.add_message(error, level, object_data_with_field)\n is_valid = False\n\n else:\n for validator_ret_item in ret:\n if isinstance(validator_ret_item, str):\n ledger.add_message(validator_ret_item, level, object_data)\n is_valid = False\n elif isinstance(validator_ret_item, dict):\n for field_name, error in list(validator_ret_item.items()):\n # verbose_field_name = ledger.map_field_name_to_verbose_name(obj, field_name)\n object_data_with_field = object_data.copy()\n object_data_with_field[\"field\"] = field_name\n verbose_name = field_name_mapper(real_obj, field_name)\n if verbose_name is None:\n from titlecase import titlecase\n\n verbose_name = titlecase(\" \".join(field_name.split(\"_\")))\n\n object_data_with_field[\"verbose_name\"] = verbose_name\n if include_field_name_in_message:\n error = \"{}: {}\".format(verbose_name, error)\n else:\n error = \"{}\".format(error)\n \n ledger.add_message(error, level, object_data_with_field)\n is_valid = False\n\n return is_valid", "def validate_type_annotations(self):\n valid = False\n invalid_types = []\n # skipping the TYPE keyword, iterate through the types\n # collecting invalid type annotations in list annots\n for t in self.annot_types[1:]:\n if t.lower() not in (\"group\", \"numeric\"):\n # if the value is a blank space, store a higher visibility\n # string for error reporting\n if \"Unnamed\" in t:\n invalid_types.append(\"<empty value>\")\n # Duplicated metadata header name causes type annotation issue.\n # Side effect of Pandas adding a suffix to uniquefy the header.\n # These invalid annotations should not be included in invalid\n # type annotation count. This exception may cause miscount of\n # type annot errors if user-supplied annotation has period.\n elif \".\" in t:\n pass\n else:\n invalid_types.append(t)\n if invalid_types:\n msg = 'TYPE row annotations should be \"group\" or \"numeric\"'\n self.store_validation_issue(\n \"error\",\n msg,\n \"format:cap:group-or-numeric\",\n associated_info=invalid_types,\n )\n else:\n valid = True\n return valid", "def _get_type_to_one_of():\n\n return {\n 'primitive': Settings._is_in_prim,\n 'list': Settings._is_sublist_in_one_of_lists,\n 'dict': Settings._is_dict_in_one_of_dicts\n }", "def _decompose_type(_type: Type[Any]) -> Tuple[Type[Any], bool]:\n\n if not typing_inspect.is_optional_type(_type):\n return _type, True\n\n args: Set[Type[Any]] = set(typing_inspect.get_args(_type))\n args.remove(NoneType)\n\n if len(args) != 1:\n _type.__args__ = tuple(args)\n return _type, False\n\n return args.pop(), False", "def test_type_of_attrs(self):\n self.assertEqual(type(self.review.place_id), str)\n self.assertEqual(type(self.review.user_id), str)\n self.assertEqual(type(self.review.text), str)", "def validate(self):\n for validator in self.exttype_validators:\n validator.validate(self.ext_type)", "def _determine_struct_tree_subtype(data_type, obj, strict):\n if '.tag' not in obj:\n raise bv.ValidationError(\"missing '.tag' key\")\n if not isinstance(obj['.tag'], six.string_types):\n raise bv.ValidationError('expected string, got %s' %\n bv.generic_type_name(obj['.tag']),\n parent='.tag')\n\n # Find the subtype the tags refer to\n full_tags_tuple = (obj['.tag'],)\n if full_tags_tuple in data_type.definition._tag_to_subtype_:\n subtype = data_type.definition._tag_to_subtype_[full_tags_tuple]\n if isinstance(subtype, bv.StructTree):\n raise bv.ValidationError(\"tag '%s' refers to non-leaf subtype\" %\n ('.'.join(full_tags_tuple)))\n return subtype\n else:\n if strict:\n # In strict mode, the entirety of the tag hierarchy should\n # point to a known subtype.\n raise bv.ValidationError(\"unknown subtype '%s'\" %\n '.'.join(full_tags_tuple))\n else:\n # If subtype was not found, use the base.\n if data_type.definition._is_catch_all_:\n return data_type\n else:\n raise bv.ValidationError(\n \"unknown subtype '%s' and '%s' is not a catch-all\" %\n ('.'.join(full_tags_tuple), data_type.definition.__name__))", "def test_multiple_types() -> None:\n soup = generate_case(\"multiple_types\")\n\n tests.html_schema_doc_asserts.assert_types(\n soup, [\"object\", \"string\", \"string or null\", \"integer or number\", \"integer, string, number or null\"]\n )", "def _parse_fields(self, fields):\n\n parsed_fields = set()\n\n if fields is not None and isinstance(fields, (list, tuple)):\n if len(fields) > 0 and isinstance(fields[0], (list,tuple)):\n parsed_fields.update(fields)\n else:\n parsed_fields.update([(x, None) for x in fields])\n\n # Does not support field.attname.\n field_names = set((field.name, None) for field in self.model._meta.fields if not field.primary_key)\n non_model_fields = parsed_fields.difference(field_names)\n if non_model_fields:\n raise ValueError(\"The following fields do not exist in this\"\n \" model: {0}\".format(\", \".join(x[0] for x in non_model_fields)))\n else:\n parsed_fields.update(self._find_text_fields())\n\n return parsed_fields", "def _or_types(field):\n return '|'.join(field.get('type', {}).get('names', []))", "def validate(self, value):\n if isinstance(value, dict):\n if set(value.keys()) == {\"type\", \"coordinates\"}:\n if value[\"type\"] != self._type:\n self.error(f'{self._name} type must be \"{self._type}\"')\n return self.validate(value[\"coordinates\"])\n else:\n self.error(\n \"%s can only accept a valid GeoJson dictionary\"\n \" or lists of (x, y)\" % self._name\n )\n return\n elif not isinstance(value, (list, tuple)):\n self.error(\"%s can only accept lists of [x, y]\" % self._name)\n return\n\n validate = getattr(self, \"_validate_%s\" % self._type.lower())\n error = validate(value)\n if error:\n self.error(error)", "def _validate_entity_type(cls, item):\n if item.entity_type not in TARGET_TYPE_TO_TARGET_MODEL:\n cls._add_error(\n 'entity %s' % base_model_validators.ERROR_CATEGORY_TYPE_CHECK,\n 'Entity id %s: Entity type %s is not allowed' % (\n item.id, item.entity_type))", "def subfields_all(verifield, required):\n for req_key, req_val in required.items():\n if getitem(verifield, req_key, '') != req_val:\n return False\n return True", "def validate(self, data):\n gender = data.get(\"gender\", None)\n size = data.get(\"size\", None)\n if gender not in ['m', 'f', 'u']:\n raise serializers.ValidationError(\n \"Gender must be either 'm' for male, 'f' \"\n \"for female, or 'u' for unknown.\")\n elif size not in ['s', 'm', 'l', 'xl', 'u']:\n raise serializers.ValidationError(\n \"Size must be 's' for small, 'm' for medium, 'l' for large,\"\n \" 'xl' for extra large, or 'u' for unknown.\")\n return data", "def validate(self, data):\n age = data.get(\"age\", None)\n age = age.split(\",\")\n size = data.get(\"size\", None)\n size = size.split(\",\")\n gender = data.get(\"gender\", None)\n gender = gender.split(\",\")\n for i in age:\n if i not in ['b', 'y', 'a', 's']:\n raise serializers.ValidationError(\n \"Age must be either 'b' for baby, 'y' for young,\"\n \" 'a' for adult, or 's' for senior. Can do multiple with\"\n \" commas, ex: a,y,e\")\n for i in size:\n if i not in ['s', 'm', 'l', 'xl']:\n raise serializers.ValidationError(\n \"Size must be either 's' for small, 'm' for medium, 'l' \"\n \"for large, or 'xl' for extra large. Can do multiple with\"\n \" commas, ex: s,l,xl\")\n for i in gender:\n if i not in ['m', 'f']:\n raise serializers.ValidationError(\n \"Gender must be either 'm' for male, or 'f' for female. Can\"\n \" have both using commas, ex: m,f\")\n return data", "def test_type_validation(self):\r\n with self.assertRaises(ValidationError):\r\n TestMapModel.create(int_map={'key': 2, uuid4(): 'val'}, text_map={2: 5})", "def _normalize_type(obj, round_digits=15):\n if isinstance(obj, (bool, str)):\n return obj\n if isinstance(obj, dict):\n return tuple((_normalize_type(k), _normalize_type(v)) for k, v in obj.items())\n elif isinstance(obj, abc.Sequence) or isinstance(obj, np.ndarray) and obj.ndim == 1:\n if len(obj) == 1:\n return _normalize_type(next(iter(obj)))\n else:\n return tuple(_normalize_type(o) for o in obj)\n else:\n try:\n num = complex(obj)\n if not round_digits is False:\n num = complex(round(num.real, round_digits), round(num.imag, round_digits))\n return num\n except TypeError:\n return obj", "def _validate_type(self) -> None:\n # TODO: add transformation logic so that we don't have to transform inputs at every place they are used, including v1 back compat support\n if not spec_type_is_parameter(self.type):\n type_utils.validate_bundled_artifact_type(self.type)", "def _is_valid_entity_type(self, entity_type):\n return entity_type in [\"artist\", \"song\", \"genre\"]", "def _invalid_fldtype(row, grid):\n field_type = grid.GetCellValue(row=row, col=1)\n if field_type.strip() == '':\n return False, ''\n if field_type not in [mg.FLDTYPE_NUMERIC_LBL, \n mg.FLDTYPE_STRING_LBL, mg.FLDTYPE_DATE_LBL]:\n msg = _('%s is not a valid field type') % field_type\n return True, msg\n return False, ''", "def abc_get_model_fields(self, record):\n if record._name == 'stock.picking':\n return [\n 'name',\n ('product_id', ['is_offer']),\n 'state',\n ('partner_id', ['display_name']),\n ]\n if record._name == 'stock.transfer_details_items':\n return [\n ('product_id', ['display_name']),\n ('product_uom_id', ['display_name', 'factor']),\n 'quantity',\n ('package_id', []),\n ('packop_id', []),\n ('result_package_id', ['display_name']),\n ('sourceloc_id', ['display_name']),\n ('destinationloc_id', ['display_name']),\n ('lot_id', ['display_name']),\n ]\n if record._name == 'product.product':\n return [\n 'display_name',\n 'default_code',\n 'ean13',\n 'is_offer',\n 'weight',\n ('uom_id', ['display_name', 'factor']),\n ]\n if record._name == 'stock.location':\n return [\n 'display_name',\n ]\n if record._name == 'product.uom':\n return [\n 'display_name',\n 'factor',\n ]\n return ['id']", "def is_model_type(obj: Any) -> bool: # pragma: no cover\n pass", "def _validate_type(self):\n # TODO: add transformation logic so that we don't have to transform outputs at every place they are used, including v1 back compat support\n if not spec_type_is_parameter(self.type):\n type_utils.validate_bundled_artifact_type(self.type)", "def get_fields_of_type(df, data_type='StructType'):\n return [f for f in df.schema.fields if str(f.dataType).find(data_type) > -1]", "def check_validity(self):\n try:\n if self.type == ConstraintTypes.EQUAL:\n enforce(\n isinstance(self.value, (int, float, str, bool)),\n f\"Expected one of type in (int, float, str, bool), got {self.value}\",\n )\n elif self.type == ConstraintTypes.NOT_EQUAL:\n enforce(\n isinstance(self.value, (int, float, str, bool)),\n f\"Expected one of type in (int, float, str, bool), got {self.value}\",\n )\n elif self.type == ConstraintTypes.LESS_THAN:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.LESS_THAN_EQ:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.GREATER_THAN:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.GREATER_THAN_EQ:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.WITHIN:\n enforce(\n isinstance(self.value, (list, tuple)),\n f\"Expected one of type in (list, tuple), got {self.value}\",\n )\n enforce(\n len(self.value) == 2, f\"Expected length=2, got {len(self.value)}\"\n )\n enforce(\n isinstance(self.value[0], type(self.value[1])), \"Invalid types.\"\n )\n enforce(\n isinstance(self.value[1], type(self.value[0])), \"Invalid types.\"\n )\n elif self.type == ConstraintTypes.IN:\n enforce(\n isinstance(self.value, (list, tuple, set)),\n f\"Expected one of type in (list, tuple, set), got {self.value}\",\n )\n if len(self.value) > 0:\n _type = type(next(iter(self.value)))\n enforce(\n all(isinstance(obj, _type) for obj in self.value),\n \"Invalid types.\",\n )\n elif self.type == ConstraintTypes.NOT_IN:\n enforce(\n isinstance(self.value, (list, tuple, set)),\n f\"Expected one of type in (list, tuple, set), got {self.value}\",\n )\n if len(self.value) > 0:\n _type = type(next(iter(self.value)))\n enforce(\n all(isinstance(obj, _type) for obj in self.value),\n \"Invalid types.\",\n )\n elif self.type == ConstraintTypes.DISTANCE:\n enforce(\n isinstance(self.value, (list, tuple)),\n f\"Expected one of type in (list, tuple), got {self.value}\",\n )\n enforce(\n len(self.value) == 2, f\"Expected length=2, got {len(self.value)}\"\n )\n enforce(\n isinstance(self.value[0], Location),\n \"Invalid type, expected Location.\",\n )\n enforce(\n isinstance(self.value[1], float), \"Invalid type, expected Location.\"\n )\n else: # pragma: nocover\n raise ValueError(\"Type not recognized.\")\n except ValueError:\n return False # pragma: nocover\n\n return True", "def _regular_type(self,\n short_type: Union[str, List[Union[str, Tuple[str, str]]]]\n ) -> List[Tuple[str, str]]:\n if isinstance(short_type, str):\n return [(short_type, '')]\n elif isinstance(short_type, list):\n result = list() # type: List[Tuple[str, str]]\n for st in short_type:\n if isinstance(st, str):\n result.append((st, ''))\n else:\n result.append(st)\n return result\n raise ValueError('Invalid short type {}'.format(short_type))", "def _validate_type(data, type, err): # lint-amnesty, pylint: disable=redefined-builtin\n if not isinstance(data, type):\n raise errors.AccountDataBadType(err)", "def data_type_validator(type_name='data type'):\n\n def validator(field, presentation, context):\n field.default_validate(presentation, context)\n\n value = getattr(presentation, field.name)\n if value is not None:\n # Test for circular definitions\n container_data_type = get_container_data_type(presentation)\n if (container_data_type is not None) and (container_data_type._name == value):\n context.validation.report(\n u'type of property \"{0}\" creates a circular value hierarchy: {1}'\n .format(presentation._fullname, safe_repr(value)),\n locator=presentation._get_child_locator('type'), level=Issue.BETWEEN_TYPES)\n\n # Can be a complex data type\n if get_type_by_name(context, value, 'data_types') is not None:\n return True\n\n # Can be a primitive data type\n if get_primitive_data_type(value) is None:\n report_issue_for_unknown_type(context, presentation, type_name, field.name)\n\n return False\n\n return validator", "def get_fields(self):\n fields = super(RelationSerializer, self).get_fields()\n\n if self.request.method == \"GET\":\n fields['type'] = serializers.CharField(source='type.name')\n else:\n fields['type'] = serializers.PrimaryKeyRelatedField(queryset=RelationType.objects.all())\n\n return fields", "def type(self):\n return self._field.type", "def test_get_relation_type(self):\n pass", "def normalize_data(typeform_response):\n survey = json.loads(typeform_response.survey)\n response = json.loads(typeform_response.response)\n answers = {}\n response_set = response.get('answers') or []\n for answer in response_set:\n field_id = answer.get('field').get('id')\n value_key = answer.get('type')\n value = json.dumps(answer.get(value_key))\n\n field = find_field(field_id, survey)\n field_title = field.get('title') if field else '??'\n\n answers[field_id] = {\n 'field_title': field_title,\n 'answer': value,\n }\n\n if typeform_response.study_group:\n answers['study_group_id'] = {\n 'field_title': 'Learning circle ID',\n 'answer': typeform_response.study_group.id,\n }\n answers['study_group_name'] = {\n 'field_title': 'Learning circle name',\n 'answer': typeform_response.study_group.name\n }\n answers['course'] = {\n 'field_title': 'Course',\n 'answer': typeform_response.study_group.course.title,\n }\n answers['facilitator'] = {\n 'field_title': 'Facilitator',\n 'answer': typeform_response.study_group.created_by.email,\n }\n if typeform_response.study_group.team:\n answers['team'] = {\n 'field_title': 'Team',\n 'answer': typeform_response.study_group.team.name\n }\n\n return answers", "def _extract_filter_type_and_value(data):\n if data.startswith(\"in:\"):\n value = list(six.text_type(data[3:]).split(\",\"))\n filter_type = 'in'\n elif data.startswith(\"nin:\"):\n value = list(six.text_type(data[4:]).split(\",\"))\n filter_type = 'nin'\n elif data.startswith(\"neq:\"):\n value = six.text_type(data[4:])\n filter_type = 'neq'\n elif data.startswith(\"gt:\"):\n value = six.text_type(data[3:])\n filter_type = 'gt'\n elif data.startswith(\"gte:\"):\n value = six.text_type(data[4:])\n filter_type = 'gte'\n elif data.startswith(\"lt:\"):\n value = six.text_type(data[3:])\n filter_type = 'lt'\n elif data.startswith(\"lte:\"):\n value = six.text_type(data[4:])\n filter_type = 'lte'\n elif data.startswith(\"eq:\"):\n value = six.text_type(data[3:])\n filter_type = 'eq'\n elif data.startswith(\"has:\"):\n value = six.text_type(data[4:])\n filter_type = 'has'\n else:\n value = data\n filter_type = 'eq'\n\n return filter_type, value", "def validate(self):\n if self.value is None and self.allow_empty:\n return self.value\n if self.value is None:\n raise Missing(f\"{self.name} is required\")\n if not type(self.value) == self.typ:\n raise ValidationError(\n f\"{self.name} should be {self.typ}. \"\n f\"Found {self.value}.\"\n )\n return self.value", "def validateInputType(self, inputType):\n raise NotImplementedError()", "def isvalid(type, binding, fetch=True, **kwargs):\n kwargs = kwargs.copy()\n kwargs['auto_convert'] = False\n kwargs['validate'] = False\n\n analysis = get_validator_analysis(Validator(type, binding['format']))\n outputs = run(analysis, {'input': binding}, fetch=fetch, **kwargs)\n return outputs['output']['data']", "def validate(self, data):\n # calling subserializer validate method (fields, and presets)\n data = super(FormidableSerializer, self).validate(data)\n # we check every field define in presets are define inside the form.\n if 'fields' in data and 'presets' in data:\n data = self.check_presets_cohesion(data)\n return data", "def validate_type(type_name, value, arg_data, scopes, command):\n # Look up the type definition and perform any validation specified there\n typedef = typedef_registry.get(type_name)\n if not typedef:\n raise error.CommandDescriptionError('Unknown type: %s' % type_name)\n\n type_result = None\n validation_result = None\n\n # If it's a subtype, validate the base type first\n base_type_name = typedef.get('base-type')\n if base_type_name:\n type_result = validate_type(base_type_name, value, arg_data, scopes, command)\n\n # FIXME: Seems like we shouldn't be handling obj-type here, since\n # that's just an attribute that's specific to certain validation\n # procs. Shouldn't that value already be available in the scopes\n # to be able to pass to the proc?\n path = _lookup_in_scopes('path', scopes)\n\n # Now validate the subtype\n # _call_proc requires that the first scope be an invocation scope that\n # it possibly modifies by settings the 'scopes' variable, so we need\n # to include an empty invocation scope here\n invocation_scope = {}\n parameter_scope = {'typedef': typedef, 'value': value, 'data': arg_data}\n if path:\n parameter_scope['path'] = path\n scopes = [invocation_scope, parameter_scope, typedef]\n while base_type_name is not None:\n base_typedef = typedef_registry.get(base_type_name)\n if not base_typedef:\n raise error.CommandDescriptionError('Invalid base-type name: %s' % base_type_name)\n scopes.append(base_typedef)\n base_type_name = base_typedef.get('base-type')\n\n #command_type = command.get('command-type')\n #command_defaults = command_type_defaults.get(command_type, {})\n\n #scopes.append([command, command_defaults])\n validation = _lookup_in_scopes('validation', scopes)\n if validation:\n validation_result = _call_proc(validation, validation_registry,\n scopes, command)\n if validation_result is None:\n validation_result = value\n\n result = _combine_validation_results(type_result, validation_result,\n typedef, value)\n\n return result", "def gather_types(self):\n\n def gather_subfields(field: Field) -> List[Field]:\n fields = [field]\n\n if isinstance(field, CompositeField):\n for f in field.fields:\n fields.extend(gather_subfields(f))\n elif isinstance(field, ArrayField):\n fields = []\n fields.extend(gather_subfields(field.itemtype))\n\n return fields\n\n types = []\n for method in self.methods:\n types.extend([method.request, method.response])\n for field in method.request.fields:\n types.extend(gather_subfields(field))\n for field in method.response.fields:\n types.extend(gather_subfields(field))\n return types", "def validate_types(self):\n for req in self.requests:\n required_types = req.get_required_types()\n available_types = self.substrate.get_types()\n if not (required_types <= available_types):\n print required_types - available_types, ' missing'\n return False\n return True", "def _model_definition_validate(self):\n try:\n assert isinstance(self.__class__.MODEL_TYPE, str)\n assert (isinstance(self.__class__.PRIMARY_KEY, str) or\n self.__class__.PRIMARY_KEY is None)\n assert isinstance(self.__class__.PRIORITY, int)\n for key in self.__class__.MODEL:\n assert re.match(\"^\" + KEY_RE_CONSTRAINT + \"$\", key)\n assert 'name' in self.__class__.MODEL\n except:\n raise ModelInvalidException(\n \"Model %s is invalid and not usable\" % (\n self.__class__.MODEL_TYPE))\n\n if self.__class__.PRIMARY_KEY and self.__class__.PRIMARY_KEY != 'name':\n if self.__class__.PRIMARY_KEY not in self.__class__.MODEL:\n raise ModelInvalidException(\n \"Model %s primary key %s does not exists\" % (\n self.__class__.MODEL_TYPE,\n self.__class__.PRIMARY_KEY))\n\n if not self.__class__.MODEL[self.__class__.PRIMARY_KEY][2]:\n raise ModelInvalidException(\n \"Model %s primary key %s should be mandatory\" % (\n self.__class__.MODEL_TYPE,\n self.__class__.PRIMARY_KEY))\n\n for constraints in self.__class__.MODEL.values():\n if len(constraints) != 6:\n raise ModelInvalidException(\n \"Model %s is invalid and not usable \"\n \"(missing field)\" % (\n self.__class__.MODEL_TYPE))\n\n try:\n # Be sure default values are of the declared type\n # make some others validation on default value\n for key, constraints in self.__class__.MODEL.items():\n # Only act on non-mandatory keys as default\n # is provided. Skip 'name' checking.\n if not constraints[2] and key != 'name':\n # Validate default value type\n assert isinstance(constraints[3],\n constraints[0])\n # Validate default value match the regexp\n # if str type\n if constraints[0] is str:\n assert re.match(constraints[1],\n constraints[3])\n # Validate list default values match the regexp\n # if list type\n if isinstance(constraints[0], list):\n assert all([re.match(constraints[1], c) for\n c in constraints[3]]) is True\n except:\n raise ModelInvalidException(\n \"Model %s is invalid and not usable \"\n \"(Wrong default value according to the type \"\n \"or regex)\" % (\n self.__class__.MODEL_TYPE))\n\n # Validate the callbacks of the inherited model\n try:\n # Be sure we have only the authorized callbacks\n assert len(set(AUTHORIZED_CALLBACKS).symmetric_difference(\n set(self.__class__.CALLBACKS))) is 0\n # Be sure the callbacks are callable or NotImplemented\n for key, callback in self.__class__.CALLBACKS.items():\n if (not callable(callback)\n and callback is not NotImplementedError):\n raise Exception\n except:\n raise ModelInvalidException(\n \"Model %s callbacks are invalid, model is not usable\" % (\n self.__class__.MODEL_TYPE))", "def split_type(type):\n\n match = _type_re.match(type)\n if not match:\n raise ValueError(\"malformed type: %r\" % type)\n kind, precision = match.groups()\n itemsize = None\n if precision:\n precision = int(precision)\n itemsize, remainder = divmod(precision, 8)\n if remainder: # 0 could be a valid item size\n raise ValueError(\"precision must be a multiple of 8: %d\"\n % precision)\n return (kind, itemsize)" ]
[ "0.6012281", "0.5627871", "0.56062263", "0.5541995", "0.5448626", "0.5435107", "0.5423925", "0.54003364", "0.5392331", "0.53780186", "0.5370685", "0.52956146", "0.5276091", "0.5266467", "0.5264356", "0.52599204", "0.5195668", "0.51644295", "0.51417065", "0.5140979", "0.51346236", "0.51346153", "0.5099984", "0.5084978", "0.50753134", "0.5063584", "0.5057491", "0.5054862", "0.5039777", "0.5025895", "0.5018328", "0.5017859", "0.4996829", "0.49954408", "0.49898028", "0.4987255", "0.49828535", "0.49667954", "0.49662298", "0.49573845", "0.49561068", "0.49391794", "0.49299344", "0.49153602", "0.48932222", "0.4888386", "0.4885943", "0.4885124", "0.48810855", "0.48698124", "0.4863966", "0.4850317", "0.4838519", "0.48350978", "0.48295587", "0.48089015", "0.48083013", "0.4807084", "0.4801476", "0.47979313", "0.47740877", "0.4772299", "0.47718355", "0.4763752", "0.47627604", "0.47600523", "0.47598743", "0.4758646", "0.47565657", "0.47561154", "0.47505108", "0.47490516", "0.47466958", "0.47379732", "0.4735345", "0.4734551", "0.47334155", "0.47333264", "0.47319725", "0.4721623", "0.4720461", "0.4710297", "0.4694725", "0.46926335", "0.46899438", "0.46872324", "0.46812978", "0.46809804", "0.4673406", "0.4672516", "0.4669438", "0.46693867", "0.46687603", "0.4667492", "0.46659365", "0.466234", "0.46578884", "0.4655386", "0.46518144", "0.4650617" ]
0.6659755
0
Yields pairs from an iterable.
def pairs(iterable): previous = None for item in iterable: current = item if previous is not None: yield previous, current previous = current
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iter_pairs(iterable):\n if isinstance(iterable, Mapping):\n iterable = iterable.items()\n return iter(iterable)", "def pairs(lst):\r\n\tfor i in range(1, len(lst), 2):\r\n\t\tyield lst[i-1], lst[i]", "def pairwise(iterable):\r\n a = iter(iterable)\r\n return izip(a, a)", "def pairs(lst):\n i = iter(lst)\n prev = next(i)\n for item in i:\n yield prev, item\n prev = item", "def pairwise(iterable):\n a = iter(iterable)\n return zip(a, a)", "def pairwise(iterable):\n a = iter(iterable)\n return zip(a, a, a)", "def pairwise(iter):\n from itertools import tee, izip\n it, it_next = tee(iter)\n next(it_next)\n for first, second in izip(it, it_next):\n yield first, second", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return izip(a, b)", "def triplewise(iterable):\n # triplewise('ABCDEFG') -> ABC BCD CDE DEF EFG\n for (a, _), (b, c) in pairwise(pairwise(iterable)):\n yield a, b, c", "def _iter_pairs(graph):\n for u, v in set(graph.edges_iter()):\n yield u, v", "def pairwise(iterable: Iterable[Any]) -> Iterable[Any]:\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairs(self, args=None, lo=None, hi=None, reverse=None, max=None,\n include=False, txn=None):\n return self._iter(txn, args, lo, hi, reverse, max, include)", "def iter_over_pairs(pairs):\r\n if isinstance(pairs, dict):\r\n return pairs.iteritems()\r\n else:\r\n return pairs", "def pairwise(iterable: Iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n previous, current = None, None\n \n for current in iterable:\n if previous:\n yield previous, current\n previous = current\n if current:\n yield current, None", "def pairwise(iterable):\r\n a, b = itertools.tee(iterable)\r\n next(b, None)\r\n return itertools.izip(a, b)", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)", "def __iter__(self):\n return ((x, y) for y, x in self._items.items())", "def __next__(self):\n for (k, v) in pairs(self._data):\n yield (v, k)", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return list(zip(a, b))", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def __iter__(self):\n pt = (self.x, self.y)\n for i in pt:\n yield i", "def pairwise(iterable):\n # copied from itertools docs\n from itertools import tee\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n\n return zip(a, b)", "def pairwise(iterable):\n a, b = tee(iterable)\n next(b, None)\n\n return zip(a, b)", "def pairwise(iterable, fillvalue=None):\n a, b = it.tee(iterable)\n next(b, fillvalue)\n return it.izip(a, b)", "def interleave(iter1, iter2):\n for pairs in zip(iter1, iter2):\n yield from pairs", "def __iter__(self):\n for label, coord_seq in self.coords.items():\n for coordinate in coord_seq:\n yield (label, tuple(coordinate),)", "def generator_wrapper(iterable):\n\n num_items = len(iterable)\n for idx in range(num_items):\n yield iterable[idx]", "def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return itertools.zip_longest(a, b)", "def split_in_pairs(arg: Iterable) -> Iterable[Tuple]:\n # We are using zip_longest with one clever hack:\n # https://docs.python.org/3/library/itertools.html#itertools.zip_longest\n # We create an iterator out of the list and then pass the same iterator to\n # the function two times. Thus the function consumes a different element\n # from the iterator each time and produces the desired result.\n iterator = iter(arg)\n return zip_longest(iterator, iterator)", "def __iter__(self):\n for key, value in self.read():\n yield key, value", "def __iter__(self):\n yield self.x\n yield self.y\n # Or, you could also do:\n # return iter([self.x, self.y])", "def __iter__(self):\n for o in self._iter:\n yield o", "def all_pairs(items, sort=False):\n if sort:\n items = sorted(items)\n for i, ni in enumerate(items):\n for j, nj in enumerate(items):\n if j > i: yield ni, nj", "def pairs(*map_or_it, **kw):\n if map_or_it:\n l = len(map_or_it)\n if l != 1:\n raise TypeError('Pass at most 1 positional argument (got %d)' % l)\n map_or_it = map_or_it[0]\n try:\n it = iteritems(map_or_it) # mapping?\n except AttributeError: # no\n for (k, v) in map_or_it: # -> treat as sequence\n yield (k, v)\n else: # yes\n for (k, v) in it: # -> treat as mapping\n yield (k, v)\n for (k, v) in iteritems(kw):\n yield (k, v)", "def iteritems(self):\n for key in self:\n yield key, self[key]", "def __iter__(self):\n\n # For each key in set of keys\n for key in self.keys_set:\n\n # Yield that key and associated value\n yield key, self.__getitem__(key)", "def __iter__(self) -> Iterable[Tuple[float, float]]:\n return iter([self.x, self.y])", "def __call__(self, iterable):\n if self._ordered:\n imap = self._distrubtor.imap\n else:\n imap = self._distrubtor.imap_unordered\n\n for result in imap(iterable):\n yield result", "def __iter__(self) -> Tuple[str, Any]:\n for attr_name, attr_val in self.__dict__.items():\n yield attr_name, attr_val", "def get_iterator(self) -> Iterator[KeypointLabelPair]:\n for i in range(len(self._ids)):\n yield self[i]", "def iteritems(self):\n for aVal, bValues in self._forwardMap.iteritems():\n for bVal in bValues:\n yield aVal, bVal\n\n return", "def __iter__(self):\n for i in range(len(self.ks)):\n yield self.get_neighs([i]), self.get_sp_rel_pos([i]),\\\n [self.ks[i]], self.iss", "def yields ( self ) :\n return tuple ( [ i for i in self.alist2 ] )", "def yields ( self ) :\n return tuple ( [ i for i in self.alist2 ] )", "def pairwise(iterable, include_tail=False):\n left, right = itertools.tee(iterable)\n next(right, None)\n if include_tail:\n right = itertools.chain(right, [None])\n\n return zip(left, right)", "def __iter__(self):\n for b in self.x:\n yield b", "def __iter__(self):\n for p in self.positions(): # use same order as positions()\n yield p.element() # but yield each element", "def iteritems(self):\n for key in self:\n yield (key, self[key])", "def __iter__(self):\n for key in sorted(self.keys):\n yield key, self[key]", "def iteritems(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n key = keys.next()\n yield (key, self[key])\n return make_iter()", "def __iter__(self) -> Iterator[Tuple[K, List[V]]]:\n raise NotImplementedError", "def pairwise(s: List[Any]) -> Iterator[Tuple[Any, Any]]:\n\n a, b = itertools.tee(s)\n next(b, None)\n return zip(a, b)", "def __iter__(self):\n for item in (self[i] for i in range(len(self))):\n yield item", "def tripletGenerator(S):\n for a in S:\n for b in S:\n for c in S:\n yield (a, b, c)", "def generic_generator(*args):\n for zips in zip(*args):\n yield zips", "def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T]]:\n # make sure we can deal with iterables like lists too\n sourceiter = iter(iterable)\n # call islice until it returns an empty tuple\n return iter(lambda: tuple(islice(sourceiter, size)), ())", "def __iter__(self):\n for i in range(self.n):\n yield self.get(i, i + 1)", "def __iter__(self):\n for item in (self[i] for i in range(len(self))):\n yield item", "def __iter__(self):\n for item in (self[i] for i in range(len(self))):\n yield item", "def __iter__(self):\r\n for attr, value in self.__dict__.items():\r\n a = getattr(self, attr)\r\n if type(a) is list:\r\n if len(a) > 0:\r\n yield attr, a", "def __iter__(self):\n for i in range(len(self)):\n yield self[i]", "def __iter__(self):\n for i in range(len(self)):\n yield self[i]", "def items(self):\n\t\tfor k, vs in self.multiple.items():\n\t\t\tfor v in vs: yield k, v", "def __iter__(self):\n yield self._x\n yield self._y", "def __iter__(self):\n for point in self.points:\n yield point", "def iteritems(self):\n return iter((kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all())", "def pairs(self):\n return self.items() if self.is_a(dict) else self.chunks(2)", "def __iter__(self):\n if self._len_keys == 1:\n yield from self._dict.keys()\n else:\n for key in self._dict.keys():\n yield tuple(sorted(list(key)))", "def itervalues(self):\n def make_iter(self=self):\n keys = self.iterkeys()\n while True:\n yield self[keys.next()]\n return make_iter()", "def _iter_items(data_sequence):\n for time, element in data_sequence:\n for item in element:\n yield time, item", "def __iter__(self):\n for index in range(len(self)):\n yield self[index]", "def __iter__(self):\n for v in self._items:\n yield v", "def iterate(self):\n yield self\n for x in self:\n for y in x.iterate():\n yield y", "def __iter__(self):\n for i in range(self.m):\n for j in range(self.n):\n yield self[i, j]", "def __iter__(self):\n for x in self.innings:\n yield x", "def __iter__(self):\n for p in self.positions():\n yield p.element()", "def pairs(self):\n return self.__pairs", "def iter(self):\n\t\tfor element in self.elements:\n\t\t\tyield element", "def iter(self):\n\t\tfor element in self.elements:\n\t\t\tyield element", "def with_iter(contextmanager):\n with contextmanager as iterable:\n for item in iterable:\n yield item", "def itervalues(self, multi=False):\n for k, v in self.iteritems(multi=multi):\n yield v", "def __iter__(self):\n cursor = 0\n while cursor < len(self):\n yield self._items[cursor]\n cursor += 1", "def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value", "def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value", "def __iter__(self):\n cursor=0\n while cursor<len(self):\n yield self._item[cursor].key\n cursor+=1", "def items(self) -> Iterator[Tuple[str, CompletionElement]]:\n for e in self._elements:\n yield e.value, e", "def pairwise(iterable: Iterable,\n tuple_size: int):\n return zip_longest(*(islice(it, pos, None) for pos, it in enumerate(tee(iterable, tuple_size))))", "def classIterator(classIter):\n for attribute, value in classIter.__dict__.iteritems():\n yield attribute, value", "def __iter__(self):\n\n for each in list(self.keys()):\n yield each", "def coordinate_iterator(coords, atoms_per_res):\n assert len(coords) % atoms_per_res == 0, f\"There must be {atoms_per_res}\" \\\n f\" atoms for every residue.\\n\" \\\n f\"len(coords) = {len(coords)}\"\n i = 0\n while i + atoms_per_res <= len(coords):\n yield coords[i:i + atoms_per_res]\n i += atoms_per_res", "def iteritems(self):\n\t\tself.filep.seek(self.start + 2048)\n\n\t\t# iterate until we hit the enddata marker\n\t\twhile self.filep.tell() < self.enddata - 1:\n\t\t\t# fetch the lengths of the key and value\n\t\t\t(klen, vlen) = unpack('<LL', self.filep.read(8))\n\n\t\t\t# yield the key and value as a tuple\n\t\t\tyield (self.filep.read(klen), self.filep.read(vlen))", "def __iter__(self):\n for y in range(self.origin.y, self.origin.y + self.size.y):\n for x in range(self.origin.x, self.origin.x + self.size.x):\n yield Vec2(x, y)", "def unzip_finite(\n iterable: Iterable[Tuple[Any, ...]],\n) -> Tuple[Iterator[Any], ...]:\n for zipped in zip(*iterable):\n yield zipped" ]
[ "0.81223637", "0.7461575", "0.7307938", "0.7271222", "0.714917", "0.7142806", "0.7133279", "0.7094758", "0.70062834", "0.6977689", "0.6964083", "0.6942506", "0.6933127", "0.6882018", "0.6859719", "0.68461996", "0.6843346", "0.6843346", "0.6843346", "0.6824935", "0.6753879", "0.6752588", "0.6743065", "0.6743065", "0.6743065", "0.6743065", "0.6736476", "0.67261297", "0.6706455", "0.6706455", "0.6678854", "0.6673888", "0.66557455", "0.66079545", "0.6580737", "0.6570719", "0.65658927", "0.64852136", "0.64819205", "0.64786667", "0.64729214", "0.64608544", "0.64469814", "0.64449066", "0.64432424", "0.64363825", "0.6426845", "0.6404562", "0.6371405", "0.6353855", "0.6353855", "0.63500935", "0.6325435", "0.6319975", "0.6304714", "0.6287775", "0.627276", "0.6259393", "0.62555987", "0.62339425", "0.62279826", "0.6216114", "0.62015563", "0.6200631", "0.61937577", "0.61937577", "0.6180671", "0.6173422", "0.6173422", "0.6166695", "0.61626077", "0.6156485", "0.61540717", "0.61298627", "0.6125899", "0.6124058", "0.6118726", "0.6116666", "0.6112269", "0.60981405", "0.60979664", "0.60815895", "0.6076743", "0.60766035", "0.60698515", "0.60698515", "0.60665214", "0.60656565", "0.60346097", "0.60335195", "0.60335195", "0.60335165", "0.6031986", "0.6030019", "0.60288036", "0.6027784", "0.6025202", "0.602367", "0.601606", "0.6001455" ]
0.77604663
1
Used to visualize the follow poing on purepursuit
def viewFollowPoint(self,follow_point_msg): marker = Marker() marker.header.frame_id = self.veh_name marker.ns = self.veh_name + "/follow_point" marker.id = 0 marker.action = Marker.ADD marker.type = Marker.SPHERE marker.lifetime = rospy.Duration.from_sec(5.0) marker.pose.position.z = 0 marker.pose.position.x = follow_point_msg.x marker.pose.position.y = follow_point_msg.y marker.color.a = 1.0 marker.scale.x = 0.1 marker.scale.y = 0.1 marker.scale.z = 0.1 marker.color.r = 0 marker.color.g = 1 marker.color.b = 0 self.pub_follow_point.publish(marker)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def head_hearteyes():\n print (hair_longer())\n print (eye_heart())\n print (nose_rightwards())\n print (mouth_smile())\n print (chin_curvy())", "def bonus_food(self):\n self.penup()\n self.shape(\"turtle\")\n self.color(\"red\")\n self.x_cordinates = random.randint(-210, 210)\n self.y_cordinates = random.randint(-210, 210)\n self.goto(self.x_cordinates, self.y_cordinates)\n print(f\"This Is Bonus Food {self.x_cordinates} and {self.y_cordinates}\")", "def house ():\n\n poly (3,300,\"red\")\n penup()\n setposition(0,-300)\n pendown()\n poly (4,300,\"brown\")\n penup()\n setposition(100,-300)\n pendown()\n poly(4,100,\"green\") \n\n return None", "def agent_portrayal(agent):\n portrayal = {\"Shape\": \"circle\",\n \"Filled\": \"true\"\n }\n\n if agent.wealth > 0:\n portrayal[\"Color\"] = \"red\"\n portrayal[\"Layer\"] = 0\n portrayal[\"r\"] = 0.5\n else:\n portrayal[\"Color\"] = \"grey\"\n portrayal[\"Layer\"] = 1\n portrayal[\"r\"] = 0.2\n\n return portrayal", "def head_plain():\n print (hair_buzz())\n print (eye_narrow())\n print (nose_triangle())\n print (mouth_smile())\n print (chin_plain())", "def principle(self):\n self.main_window.message(\n bg=\"navy\", fg=\"ivory\", width=400, font=\"Helvetica 10 bold\",\n text=\"The pieces in this game each have one white and one black\"\n \" side. When you click on a piece, all 8 adjacent pieces turn\"\n \" over.\\nThe game consists of trying to turn them all over.\\n\"\n \"\\nIf the exercise is very easy with a 2 x 2 grid, it becomes\"\n \" more difficult with larger grids. It is even impossible with\"\n \" some grids.\\nIt's up to you to find out which ones!\\n\"\n \" Reference: 'Pour la Science' magazine\")", "def seven_punishment(self):\n if self.current_draw_punishment == 1:\n self.current_draw_punishment = 2\n else:\n self.current_draw_punishment = self.current_draw_punishment + 2", "def outro():\n print('Tento remake vytvoril mirek sko súčasť svojich školení v rokoch 2022-2023.')\n print('Originálnu hru vytvoril v roku 1986 František Fuka aka Fuxoft.')\n print('See you soon.')", "def head_surprised():\n print (hair_spiky())\n print (eye_wide())\n print (nose_leftwards())\n print (mouth_open())\n print (chin_combo())", "def draw(self):\r\n #if the UFO has only 1 life left, turn it red\r\n if(self.life <= 1):\r\n TARGET_UFO_COLOR = arcade.color.RED\r\n #If UFO has more than 1 life left, keep it silver\r\n else:\r\n TARGET_UFO_COLOR = arcade.color.SILVER\r\n arcade.draw_circle_outline(self.center.x, self.center.y, TARGET_UFO_HEIGHT, TARGET_UFO_COLOR, 3)\r\n arcade.draw_ellipse_filled(self.center.x, self.center.y, TARGET_UFO_WIDTH, TARGET_UFO_HEIGHT, TARGET_UFO_COLOR, 15)", "def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)", "def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)", "def draw_o(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.up()\r\n pen.forward(50)", "def draw(c):\n c.draw_line((0,130), (580,130), 200, mood)\n c.draw_line((0,450), (290,450), 200, p1mood)\n c.draw_line((290,450), (580,450), 200, p2mood)\n c.draw_line((0,290), (580,290), 200, \"black\")\n c.draw_text(format(current), (150, 330), 110, \"yellow\")\n \n c.draw_line((193,108), (387,108), 120, \"#000080\")\n c.draw_line((0,25), (580,25), 50, \"#00FFFF\")\n c.draw_text(\"SINGLE PLAYER\", (20, 34), 30, \"#191970\")\n c.draw_text(\"Score\", (250, 90), 30, \"white\", \"sans-serif\")\n c.draw_line((250,96), (329,96), 4, \"white\")\n c.draw_text(score(wins, tries), scorepos, 40, \"white\", \"sans-serif\")\n \n c.draw_line((66,472), (220,472), 120, \"#556B2F\")\n c.draw_line((360,472), (514,472), 120, \"#4B0082\") \n c.draw_line((0,555), (580,555), 50, \"#F4A460\")\n c.draw_text(\"TWO PLAYER\", (20, 566), 30, \"#800000\")\n c.draw_text(\"Player 1\", (90, 454), 30, \"#F0E68C\", \"sans-serif\")\n c.draw_line((90,464), (200,464), 4, \"#F0E68C\")\n c.draw_text(score_string(p1score), p1scorepos, 40, \"#F0E68C\", \"sans-serif\")\n c.draw_text(\"Player 2\", (380,454), 30, \"#E6E6FA\", \"sans-serif\")\n c.draw_line((380,464), (490,464), 4, \"#E6E6FA\")\n c.draw_text(score_string(p2score), p2scorepos, 40, \"#E6E6FA\", \"sans-serif\")\n c.draw_line((0,440), (580,440), result2pline, \"#F7DE00\")\n c.draw_text(result2p, (180,450), 35, \"black\")", "def IntroduceNao():\n\n\t# First, wake up\n\t#motionProxy.wakeUp()\n\tpostureProxy.goToPosture(\"Crouch\", 0.5)\n\tturn_on_eye()\n\tmotionProxy.setBreathEnabled('Body', False)\n\tmotionProxy.setBreathEnabled('Arms', True)\n\t#motionProxy.setBreathEnabled('Head', True)\n\t#motionProxy.rest()\n\n\n\t'''if msg.data in story_dict:\n\t\tstoryNum = story_dict[msg.data]\n\t\tprint storyNum'''\n\n\t'''if msg.data == '[0, 1]' or msg.data == '[1, 0]':\n\t\tif pairs_dict['[0, 1]'] == False:\n\t\t\tprint \"test [0, 1]\"\n\t\t\tpairs_dict['[0, 1]'] = True'''\n\tstory.setLanguage('English')\n\t#story.say(\"\\\\rspd=90\\\\ Hello \\\\pau=500\\\\ My name is nao \\\\pau=500\\\\ I really like reading short stories\")\n\t#story.say(\"\\\\rspd=90\\\\ Do you want to listen to them?\")\n\t#story.say(\"\\\\rspd=90\\\\ sometimes I make mistakes, can you help me to correct them?\")\n\ttime.sleep(1)\n\t#story.say(\"\\\\rspd=90\\\\ If you want to read with me, please bring the book\")\n\tstory.say(\"\\\\rspd=90\\\\ Hello\")\n\tpitch_angle = 0.1\n\tLookAtTheBook(pitch_angle)\n\ttime.sleep(2)", "def showdown(self):\r\n\r\n poker_hands = []\r\n message = \"\"\r\n for player in self.players:\r\n poker_hands.append(player.hand.best_poker_hand(self.community_cards.cards))\r\n\r\n # Reveal all cards when the round is over\r\n player.reveal_cards()\r\n\r\n if poker_hands[0].type > poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].type), str(poker_hands[1].type))\r\n self.players[0].credits += self.pot\r\n\r\n if poker_hands[0].type < poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].type), str(poker_hands[0].type))\r\n self.players[1].credits += self.pot\r\n\r\n if poker_hands[0].type == poker_hands[1].type:\r\n if poker_hands[0].highest_values > poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].highest_values), str(poker_hands[1].highest_values))\r\n self.players[0].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values < poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].highest_values), str(poker_hands[0].highest_values))\r\n self.players[1].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values == poker_hands[1].highest_values:\r\n message = \"It is a draw! Both players had >{}< and highest value >{}<\".format(\r\n poker_hands[0].type.name, str(poker_hands[0].highest_values))\r\n\r\n for player in self.players:\r\n player.credits += (self.pot // len(self.players))\r\n else:\r\n self.game_message_warning.emit(\"Incorrect comparison of poker hands\")\r\n\r\n self.new_output.emit(message)\r\n self.game_message.emit(message)\r\n self.new_credits.emit()\r\n self.new_pot.emit()", "def startPresentation(ntrials):\n\n for trialIdx in range(ntrials):\n\n # draw the fixcross followed by the star\n drawFixCross(1.0)\n drawStar(4.0)", "def display(self):\n for i in range(0, len(self.__drawn)):\n if self.__drawn[i]:\n print(str(i+1) + \". You drew a short straw!\")\n else:\n print(str(i+1) + \". You're okay.\")", "def print_prediction (self, ptup):\n from . import ellipses\n bestra, bestdec, maj, min, pa = ptup\n\n f = ellipses.sigmascale (1)\n maj *= R2A\n min *= R2A\n pa *= R2D\n\n print ('position =', fmtradec (bestra, bestdec))\n print ('err(1σ) = %.2f\" × %.2f\" @ %.0f°' % (maj * f, min * f, pa))", "def Peacekeaper(self):\n\t\tprint(self.name.title() + \" is now shotting.\")", "def affichage(self):\r\n print(self.ship)\r\n for a in range(self.dim+2):\r\n for b in range(self.dim+2):\r\n print(self.plat[b][a],end=\" \")\r\n print(\"\")", "def draw(self, view):\n for i in self.get_aliens():\n for n in i:\n if not n is None:\n n.draw(view)\n self.get_ship().draw(view)\n self.get_ship().get_linevalue().draw(view)\n for i in self.get_PU():\n i.draw(view)\n for i in self.get_bolts():\n i.draw(view)", "def render_shore_noise(self, points):\n point_list = [(x + 50, -y + 800) for x, y in points] # Up is -ve\n pygame.draw.line(self.surface, CYAN, (50, 800), (410, 800), 1) # x-axis\n pygame.draw.line(self.surface, CYAN, (50, 800), (50, 700), 1) # y-axis\n\n for x, y in point_list: # points\n self.surface.set_at((int(x), int(y)), RED)", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def player_tie(self):\r\n\r\n self.summary = (\" \"* 78) + \"TIE. TRY AGAIN\"\r\n print(\"Match ends in a draw.\\n\")", "def display_player_points():\r\n pass", "def draw_pose(preds, img):\n humans = preds['predictions']\n for human in humans:\n pose_lines = human['pose_lines']\n for i, _ in enumerate(pose_lines):\n line = pose_lines[i]['line']\n cv2.line(img, (line[0], line[1]), (line[2], line[3]), COCO_COLORS[i], 3)", "def draw(img, p):\n\t\t\n\t########### First of all we gotta define which point connects together, there are points for the head(5), the shoulders, elbows, hands, top of thighs, knees, feet\n\t########### top of thighs, knees and feet. We also gonna add 2 extra points for the neck and the pelvis, to make everythonh look better.\n\t########### We also provide the color of each line here\n\tconnexions = [\t\n\t\t\t\t\t(5, 7, 'navy'),\t\t# shoulder => elbow\n\t\t\t\t\t(7, 9, 'navy'),\t\t# elbow => hand\n\t\t\t\t\t(6, 8, 'navy'),\t\t# same on the other side\n\t\t\t\t\t(8, 10, 'navy'),\n\t\t\t\t\t(11, 13, 'lime'),\t# thigh => knee\n\t\t\t\t\t(13, 15, 'lime'),\t# knee => foot\n\t\t\t\t\t(12, 14, 'lime'),\t# same on the other side\n\t\t\t\t\t(14, 16, 'lime'),\n\n\t\t\t\t\t###### With The Extra points :\n\n\t\t\t\t\t(0, 17, 'aqua'),\t# head => neck\n\t\t\t\t\t(17, 5, 'aqua'),\t# neck => shoulders\n\t\t\t\t\t(17, 6, 'aqua'),\n\t\t\t\t\t(17, 18, 'teal'),\t# neck => pelvis\n\t\t\t\t\t(18, 11, 'teal'),\t# pelvis => thighs\n\t\t\t\t\t(18, 12, 'teal')\n\t\t\t\t\t]\n\n\t###### now let's find out how many objects were detected \n\t\n\tl = len(p[0][\"scores\"])\n\n\t##### time to draw now, we'll only select objects with a score over .9\n\n\td = idw.Draw(img)\n\n\tfor k in range(l):\n\n\t\tif p[0][\"scores\"][k] > 0.98:\n\n\t\t\t##### Let's add the neck and pelvis:\n\t\t\tneck = (p[0][\"keypoints\"][k][5] + p[0][\"keypoints\"][k][6])/2\n\t\t\tpelv = (p[0][\"keypoints\"][k][11] + p[0][\"keypoints\"][k][12])/2\n\n\t\t\t#### it's getting tricky here\n\n\t\t\tnepe = t.zeros((2, 3))\n\t\t\tnepe[0] = neck ; nepe[1] = pelv \n\n\t\t\t### Now let's put everything into a single tensor\n\t\t\tbody = t.cat((p[0][\"keypoints\"][k], nepe))\n\n\t\t\t#### We can start drawing now, for real\n\n\t\t\tfor tp in connexions:\n\n\t\t\t\tp0 = (int(body[tp[0], 0]), int(body[tp[0], 1]))\n\t\t\t\tp1 = (int(body[tp[1], 0]), int(body[tp[1], 1]))\n\t\t\t\td.line([p0, p1], fill=tp[2], width=2)\n\n\t\t\t#### Now the points\n\n\t\t\tfor ts in t.cat((body[0:1], body[5:])):\n\t\t\t\td.ellipse((int(ts[0]-2), int(ts[1]-2), int(ts[0]+2), int(ts[1]+2)), 'fuchsia')\n\n\t### and finally\n\t#plt.imshow(np.asarray(img)) Not Like That\n\timg.show()", "def test_visualize_recipe_nutrition(self):\n pass", "def pure_pursuit(self, waypoint: numpy.ndarray):\n Kg, Kc = 0.3, 4\n rear_pos = np.array([self.x, self.y]) - (self.LENGTH / 4) * np.array([np.cos(self.h), np.sin(self.h)])\n LOOK_AHEAD = Kg * self.v + Kc\n\n target_x = self.x + LOOK_AHEAD * np.cos(self.h)\n target_y = self.y + LOOK_AHEAD * np.sin(self.h)\n\n target_pos, _ = get_waypoint_pos([target_x, target_y], waypoint)\n\n alpha = np.arctan2(target_pos[1] - rear_pos[1], target_pos[0] - rear_pos[0]) - self.h\n\n l_dist = np.hypot(target_pos[1] - rear_pos[1], target_pos[0] - rear_pos[0])\n\n delta_steer = np.arctan2(self.LENGTH * np.sin(alpha), l_dist)\n\n return delta_steer", "def display(self, color = (190,205,205), add = False): \r\n s += pgl.Shape(pgl.FaceSet( [[0,0,0],[1,0,0],[1,1,0],[0,1,0]], [[0,1,2,3]]) , pgl.Material((0,100,0)))", "def draw_point(turt, pos, count):\r\n turt.goto(pos)\r\n turt.color(\"lawngreen\")\r\n turt.dot(8)\r\n turt.pu()\r\n turt.forward(5)\r\n turt.color(\"HotPink1\")\r\n turt.write(count, True, align=\"left\")\r\n turt.hideturtle()", "def drawDescription(self):\n print(\"\\nPress the following keys to run the features of the GoPiGo3.\")\n print(\"To move the motors, make sure you have a fresh set of batteries powering the GoPiGo3.\\n\")", "def turn(self):\n\n # Let \n # I = loan interest rate\n # E = awarded money as a result of certain accounts randomly quadrupling\n # A = original assets under management\n #\n # Then profit = A * I - E\n self.profits.append(self.assets_under_management * LOAN_INTEREST - \\\n self._award_accounts())\n self.assets_under_management = np.sum(self.accounts)", "def display():\r\n fill(un)\r\n ellipse(x,y,2*rayonBalle,2*rayonBalle)\r\n \r\n fill(deux)\r\n ellipse(xDeux,yDeux,2*rayonBalle,2*rayonBalle) \r\n \r\n fill(trois)\r\n ellipse(xTrois,yTrois,2*rayonBalle,2*rayonBalle)", "def handDecision(handIn):", "def clue(self):\n if self.item == \"receipt\":\n print(\"The receipt reads that Jay bought 'diltiazem' medication 4 days ago.\")\n print(\"Diltiazem: medication for high blood pressure, when \"\n \"consumed by an individual in large quantities without high blood\"\n \"pressure, can cause heart failure.\")\n else:\n print(\"That is the wrong item!\")", "def compute_follow(self):\n compute_follow_sets(self)", "def plot_positives(positives):\n plt.scatter(positives[:,0], positives[:,1], label='Goal examples', marker='*', color='g', s=200)", "def reckon(self):", "def show_tract(segmented_tract, color_positive ,segmented_tract_negative, color_negative, out_path): \r\n affine=utils.affine_for_trackvis(voxel_size=np.array([1.25,1.25,1.25]))\r\n bundle_native = transform_streamlines(segmented_tract, np.linalg.inv(affine))\r\n \r\n bundle_nativeNeg = transform_streamlines(segmented_tract_negative, np.linalg.inv(affine))\r\n\r\n renderer = window.Renderer()\r\n stream_actor2 = actor.line(bundle_native,\r\n colors=color_positive, linewidth=0.1)\r\n \r\n stream_actorNeg = actor.line(bundle_nativeNeg, colors=color_negative,\r\n opacity=0.01, linewidth=0.1)\r\n renderer.set_camera(position=(408.85, -26.23, 92.12),\r\n focal_point=(0.42, -14.03, 0.82),\r\n view_up=(-0.09, 0.85, 0.51))\r\n \r\n bar = actor.scalar_bar()\r\n renderer.add(stream_actor2)\r\n \r\n renderer.add(stream_actorNeg)\r\n renderer.add(bar)\r\n window.show(renderer, size=(1920, 1039), reset_camera=False)\r\n renderer.camera_info()\r\n \r\n \"\"\"Take a snapshot of the window and save it\r\n \"\"\"\r\n window.record(renderer, out_path = out_path, size=(1920, 1039))", "def ProbCorrectTable():\n efficacies = [3, 1.5, 0, -1.5, -3]\n difficulties = [-1.85, -0.05, 1.75]\n\n for eff in efficacies:\n print('%0.2f & ' % eff, end=' ') \n for diff in difficulties:\n p = ProbCorrect(eff, diff)\n print('%0.2f & ' % p, end=' ') \n print(r'\\\\')", "def _undogoto(self, entry):\n old, new, go_modes, coodata = entry\n drawing, pc, ps, filling = go_modes\n cLI, cL, pl, items = coodata\n screen = self.screen\n if abs(self._position - new) > 0.5:\n print (\"undogoto: HALLO-DA-STIMMT-WAS-NICHT!\")\n # restore former situation\n self.currentLineItem = cLI\n self.currentLine = cL\n\n if pl == [(0, 0), (0, 0)]:\n usepc = \"\"\n else:\n usepc = pc\n screen._drawline(cLI, pl, fill=usepc, width=ps)\n\n todelete = [i for i in self.items if (i not in items) and\n (screen._type(i) == \"line\")]\n for i in todelete:\n screen._delete(i)\n self.items.remove(i)\n\n start = old\n if self._speed and screen._tracing == 1:\n diff = old - new\n diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2\n nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed))\n delta = diff * (1.0/nhops)\n for n in range(1, nhops):\n if n == 1:\n top = True\n else:\n top = False\n self._position = new + delta * n\n if drawing:\n screen._drawline(self.drawingLineItem,\n (start, self._position),\n pc, ps, top)\n self._update()\n if drawing:\n screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)),\n fill=\"\", width=ps)\n # Myturtle now at position old,\n self._position = old\n ## if undo is done during creating a polygon, the last vertex\n ## will be deleted. if the polygon is entirely deleted,\n ## creatingPoly will be set to False.\n ## Polygons created before the last one will not be affected by undo()\n if self._creatingPoly:\n if len(self._poly) > 0:\n self._poly.pop()\n if self._poly == []:\n self._creatingPoly = False\n self._poly = None\n if filling:\n if self._fillpath == []:\n self._fillpath = None\n print(\"Unwahrscheinlich in _undogoto!\")\n elif self._fillpath is not None:\n self._fillpath.pop()\n self._update() #count=True)", "def clue(self):\n if self.item == \"notebook\":\n print(\"Megan wrote about family members.\")\n print(\"Lilian: old, but healty, had 3 siblings, parents of Jay, Megan, Abriella, rich\")\n print(\"Jay: in his 30s, has a high blood pressure\")\n print(\"Megan: in her 30s, no health conditions\")\n print(\"Abriella: Youngest of the cousins, in her 20s, has Asthma\")\n else:\n print(\"That is the wrong item!\")", "def draw_housing_2():\r\n tom.pensize(3)\r\n tom.color(\"black\", \"darkgrey\")\r\n tom.begin_fill()\r\n tom.forward(80)\r\n tom.left(90)\r\n tom.forward(200)\r\n tom.circle(40, 180)\r\n tom.forward(200)\r\n tom.left(90)\r\n tom.end_fill()\r\n tom.hideturtle()", "async def pointstop(self, ctx):\n inventories = get_file(\"inventories\")\n players_points = {}\n\n for player in inventories.items():\n player_points = 0\n for item in player[1][\"items\"]:\n player_points += item[\"points\"]\n players_points[player[0]] = player_points\n\n ptop = sorted(players_points.items(), key=lambda x: x[1], reverse=True)\n\n player_field = \"\"\n points_field = \"\"\n rank = 1\n\n for player in ptop:\n player_field += f\"`#{rank}` <@{player[0]}>\\n\"\n points_field += f\"`{player[1]}`\\n\"\n rank += 1\n\n embed = discord.Embed(color=default_color)\n embed.set_author(name=\"🏆 Classement des points\")\n embed.add_field(name=\"[#] Joueur\", value=player_field)\n embed.add_field(name=\"Points\", value=points_field)\n embed = set_footer(embed, ctx)\n await ctx.send(embed=embed)", "def polago(x , y, size, n, clr):\n # turtle setting\n turtle.screensize(1000)\n turtle.speed(30)\n turtle.setheading(0)\n turtle.color(clr)\n turtle.fillcolor(clr) \n turtle.goto(x, y)\n # draw random polagon \n while n > 1:\n # make random polagon\n turtle.pendown()\n turtle.begin_fill()\n # random size\n s = random.randint(10, size)\n a = random.randint(3, 8)\n for i in range (a):\n turtle.forward(s)\n turtle.left(360 / a) \n turtle.end_fill()\n n -= 1\n turtle.penup()\n turtle.goto(random.uniform(-300, 300), random.uniform(-300, 300))\n\n turtle.done", "def draw_housing():\n tess.pensize(3)\n tess.color(\"black\", \"darkgrey\")\n tess.begin_fill()\n tess.forward(80)\n tess.left(90)\n tess.forward(200)\n tess.circle(40, 180)\n tess.forward(200)\n tess.left(90)\n tess.end_fill()\n tess.hideturtle()", "def display_current_situation(self, magic):\n usable_points = 0\n active_powers = []\n header_print(\"Your side of the struggle:\")\n for card in self.stats['active']:\n print(card_format(card))\n for power in card['powers']:\n active_powers.append(power)\n for power in card['powers']:\n if power in self.stats['opponent']['powers']:\n usable_points += card['points']\n break\n print(\n \"\\nRelevant strength: %d Magic: %d Relevant powers: %s\" % (\n usable_points,\n magic,\n ', '.join(\n set(\n self.stats['opponent']['powers']\n ).intersection(active_powers)\n )\n )\n )\n header_print(\"The other side of the struggle:\")\n print(card_format(self.stats['opponent']))\n return usable_points", "def bprint(self):\n\t\tpcolor = [\n\t\t\t(0, 0, 255, 255),\n\t\t\t(255, 0, 0, 255),\n\t\t\t(0, 255, 0, 255),\n\t\t\t(255, 255, 0, 255),\n\t\t\t(0, 255, 255, 255),\n\t\t\t(255, 140, 0, 255),\n\t\t\t(140, 0, 255, 255),\n\t\t\t(255, 0, 255, 255)\n\t\t]\n\t\timg = Image.open(bundled_data_path(self.cog) / 'img.png')\n\t\td = ImageDraw.Draw(img)\n\t\t#OWNEDBY\n\t\tfor t in range(40):\n\t\t\tif self.ownedby[t] > -1:\n\t\t\t\tif 0 < t < 10:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[(650-(t*50))-39,702,(650-(t*50))-10,735],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[(650-(t*50))-37,702,(650-(t*50))-12,733],\n\t\t\t\t\t\tfill=pcolor[self.ownedby[t]]\n\t\t\t\t\t)\n\t\t\t\telif 10 < t < 20:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[16,(650-((t-10)*50))-39,50,(650-((t-10)*50))-10],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[18,(650-((t-10)*50))-37,50,(650-((t-10)*50))-12],\n\t\t\t\t\t\tfill=pcolor[self.ownedby[t]]\n\t\t\t\t\t)\n\t\t\t\telif 20 < t < 30:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[(100+((t-20)*50))+11,16,(100+((t-20)*50))+41,50],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[(100+((t-20)*50))+13,18,(100+((t-20)*50))+39,50],\n\t\t\t\t\t\tfill=pcolor[self.ownedby[t]]\n\t\t\t\t\t)\n\t\t\t\telif 30 < t < 40:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[702,(100+((t-30)*50))+11,736,(100+((t-30)*50))+41],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[702,(100+((t-30)*50))+13,734,(100+((t-30)*50))+39],\n\t\t\t\t\t\tfill=pcolor[self.ownedby[t]]\n\t\t\t\t\t)\n\t\t#TILE\n\t\t#Because the player int used to be 1 indexed, the players would be in the wrong\n\t\t#position without 1 indexing and subtracting 1 from t when calling self.tile[t]\n\t\t#and pcolor[t]. I could fix this by changing the hard coded values, but this is\n\t\t#easier in the short term.\n\t\tfor t in range(1, self.num + 1):\n\t\t\tif not self.isalive[t-1]:\n\t\t\t\tcontinue\n\t\t\tif self.tile[t-1] == 0:\n\t\t\t\td.rectangle(\n\t\t\t\t\t[(12*(t-1))+604,636,(12*(t-1))+614,646], fill=(0,0,0,255)\n\t\t\t\t)\n\t\t\t\td.rectangle(\n\t\t\t\t\t[(12*(t-1))+605,637,(12*(t-1))+613,645], fill=pcolor[t-1]\n\t\t\t\t)\n\t\t\telif 0 < self.tile[t-1] < 10:\n\t\t\t\tif t < 5:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[((650-(self.tile[t-1]*50))-47)+(12*(t-1)),636,((650-(self.tile[t-1]*50))-37)+(12*(t-1)),646],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[((650-(self.tile[t-1]*50))-46)+(12*(t-1)),637,((650-(self.tile[t-1]*50))-38)+(12*(t-1)),645],\n\t\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[((650-(self.tile[t-1]*50))-47)+(12*(t-5)),648,((650-(self.tile[t-1]*50))-37)+(12*(t-5)),658],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[((650-(self.tile[t-1]*50))-46)+(12*(t-5)),649,((650-(self.tile[t-1]*50))-38)+(12*(t-5)),657],\n\t\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t\t)\n\t\t\telif self.tile[t-1] == 10:\n\t\t\t\td.rectangle(\n\t\t\t\t\t[106,(12*(t-1))+604,116,(12*(t-1))+614],\n\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t)\n\t\t\t\td.rectangle(\n\t\t\t\t\t[107,(12*(t-1))+605,115,(12*(t-1))+613],\n\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t)\n\t\t\telif 10 < self.tile[t-1] < 20:\n\t\t\t\tif t < 5:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[106,((650-((self.tile[t-1]-10)*50))-47)+(12*(t-1)),116,((650-((self.tile[t-1]-10)*50))-37)+(12*(t-1))],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[107,((650-((self.tile[t-1]-10)*50))-46)+(12*(t-1)),115,((650-((self.tile[t-1]-10)*50))-38)+(12*(t-1))],\n\t\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[94,((650-((self.tile[t-1]-10)*50))-47)+(12*(t-5)),104,((650-((self.tile[t-1]-10)*50))-37)+(12*(t-5))],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[95,((650-((self.tile[t-1]-10)*50))-46)+(12*(t-5)),103,((650-((self.tile[t-1]-10)*50))-38)+(12*(t-5))],\n\t\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t\t)\n\t\t\telif self.tile[t-1] == 20:\n\t\t\t\td.rectangle(\n\t\t\t\t\t[138-(12*(t-1)),106,148-(12*(t-1)),116],\n\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t)\n\t\t\t\td.rectangle(\n\t\t\t\t\t[139-(12*(t-1)),107,147-(12*(t-1)),115],\n\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t)\n\t\t\telif 20 < self.tile[t-1] < 30:\n\t\t\t\tif t < 5:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[((100+((self.tile[t-1]-20)*50))+39)-(12*(t-1)),106,((100+((self.tile[t-1]-20)*50))+49)-(12*(t-1)),116],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[((100+((self.tile[t-1]-20)*50))+40)-(12*(t-1)),107,((100+((self.tile[t-1]-20)*50))+48)-(12*(t-1)),115],\n\t\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[((100+((self.tile[t-1]-20)*50))+39)-(12*(t-5)),94,((100+((self.tile[t-1]-20)*50))+49)-(12*(t-5)),104],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[((100+((self.tile[t-1]-20)*50))+40)-(12*(t-5)),95,((100+((self.tile[t-1]-20)*50))+48)-(12*(t-5)),103],\n\t\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t\t)\n\t\t\telif self.tile[t-1] == 30:\n\t\t\t\td.rectangle(\n\t\t\t\t\t[636,138-(12*(t-1)),646,148-(12*(t-1))],\n\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t)\n\t\t\t\td.rectangle(\n\t\t\t\t\t[637,139-(12*(t-1)),645,147-(12*(t-1))],\n\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t)\n\t\t\telif 30 < self.tile[t-1] < 40:\n\t\t\t\tif t < 5:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[636,((100+((self.tile[t-1]-30)*50))+39)-(12*(t-1)),646,((100+((self.tile[t-1]-30)*50))+49)-(12*(t-1))],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[637,((100+((self.tile[t-1]-30)*50))+40)-(12*(t-1)),645,((100+((self.tile[t-1]-30)*50))+48)-(12*(t-1))],\n\t\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[648,((100+((self.tile[t-1]-30)*50))+39)-(12*(t-5)),658,((100+((self.tile[t-1]-30)*50))+49)-(12*(t-5))],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[649,((100+((self.tile[t-1]-30)*50))+40)-(12*(t-5)),657,((100+((self.tile[t-1]-30)*50))+48)-(12*(t-5))],\n\t\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t\t)\n\t\t#NUMHOUSE\n\t\tfor t in range(40):\n\t\t\tif self.numhouse[t] == 5:\n\t\t\t\tif 0 < t < 10:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[(650-(t*50))-33,606,(650-(t*50))-15,614],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[(650-(t*50))-32,607,(650-(t*50))-16,613],\n\t\t\t\t\t\tfill=(255,0,0,255)\n\t\t\t\t\t)\n\t\t\t\telif 10 < t < 20:\t\t\t\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[138,(650-((t-10)*50))-33,146,(650-((t-10)*50))-17],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[139,(650-((t-10)*50))-32,145,(650-((t-10)*50))-18],\n\t\t\t\t\t\tfill=(255,0,0,255)\n\t\t\t\t\t)\n\t\t\t\telif 20 < t < 30:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[(100+((t-20)*50))+17,138,(100+((t-20)*50))+35,146],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[(100+((t-20)*50))+18,139,(100+((t-20)*50))+34,145],\n\t\t\t\t\t\tfill=(255,0,0,255)\n\t\t\t\t\t)\n\t\t\t\telif 30 < t < 40:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[606,(100+((t-30)*50))+17,614,(100+((t-30)*50))+35],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[607,(100+((t-30)*50))+18,613,(100+((t-30)*50))+34],\n\t\t\t\t\t\tfill=(255,0,0,255)\n\t\t\t\t\t)\n\t\t\telif self.numhouse[t] > 0:\n\t\t\t\tfor tt in range(self.numhouse[t]):\n\t\t\t\t\tif 0 < t < 10:\n\t\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t\t[((650-(t*50))-47)+(tt*12),606,((650-(t*50))-37)+(tt*12),614],\n\t\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t\t)\n\t\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t\t[((650-(t*50))-46)+(tt*12),607,((650-(t*50))-38)+(tt*12),613],\n\t\t\t\t\t\t\tfill=(0,255,0,255)\n\t\t\t\t\t\t)\n\t\t\t\t\telif 10 < t < 20:\n\t\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t\t[138,((650-((t-10)*50))-47)+(tt*12),146,((650-((t-10)*50))-37)+(tt*12)],\n\t\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t\t)\n\t\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t\t[139,((650-((t-10)*50))-46)+(tt*12),145,((650-((t-10)*50))-38)+(tt*12)],\n\t\t\t\t\t\t\tfill=(0,255,0,255)\n\t\t\t\t\t\t)\n\t\t\t\t\telif 20 < t < 30:\n\t\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t\t[((100+((t-20)*50))+39)-(tt*12),138,((100+((t-20)*50))+49)-(tt*12),146],\n\t\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t\t)\n\t\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t\t[((100+((t-20)*50))+40)-(tt*12),139,((100+((t-20)*50))+48)-(tt*12),145],\n\t\t\t\t\t\t\tfill=(0,255,0,255)\n\t\t\t\t\t\t)\n\t\t\t\t\telif 30 < t < 40:\n\t\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t\t[606,((100+((t-30)*50))+39)-(tt*12),614,((100+((t-30)*50))+49)-(tt*12)],\n\t\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t\t)\n\t\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t\t[607,((100+((t-30)*50))+40)-(tt*12),613,((100+((t-30)*50))+48)-(tt*12)],\n\t\t\t\t\t\t\tfill=(0,255,0,255)\n\t\t\t\t\t\t)\n\t\t#END\n\t\ttemp = BytesIO()\n\t\ttemp.name = 'board.png'\n\t\timg.save(temp)\n\t\ttemp.seek(0)\n\t\treturn temp", "def loan(self):", "def display_pokemon(pokemon_party):\n \n for count, i in enumerate(pokemon_party, start=0):\n print(f\"{count+1}){pokemon_party[count]}\")", "def rop():\n return", "def healPokemon(user):\n\n display_pokemon(user.pokemon_party)\n\n #Ask which pokemon needs healing?\n pokemon_to_heal = user_selection(len(user.pokemon_party),f\"Which Pokemon do you want to heal?(1-{len(user.pokemon_party)}):\")\n\n #Ask which item to use\n db.execute(\"SELECT * FROM hp_restoring_items\")\n medicine = db.fetchall()\n\n for count, item in enumerate(medicine):\n print(f\"{count+1}) {item[1]}\")\n \n medicine_to_use = user_selection(len(medicine), f\"Which Healing Item do youy want to use?(1-{len(medicine)})\")\n print(medicine[medicine_to_use-1][1])\n print(user.heal_pokemon(pokemon_to_heal-1, medicine[medicine_to_use-1][1], medicine[medicine_to_use-1][3]))", "def show_prop(self):\n print(self.population_size)\n print(self.max_generation)\n print(self.mutate_rate)\n print(self.elite_rate)\n print(self.cross_rate)\n print(self.cross_type)\n print(self.verify_num)\n print(self.proof)", "def draw(self):", "def illustrate_prediction(model, test_data, test_target):\n selects = np.random.random_integers(0, len(test_data), 16)\n labels = test_target[selects]\n predicts = model.predict(test_data[selects])\n plt.figure()\n for k in range(16):\n plt.subplot(4, 4, k+1)\n plot_face(test_data[selects[k]])\n if predicts[k] == 1:\n plt.title('smile')\n else:\n plt.title('ugly')\n\n if predicts[k] != labels[k]:\n plt.plot([0, 24], [0, 24], 'r', linewidth=2)\n plt.plot([0, 24], [24, 0], 'r', linewidth=2)", "def draw_pavement():\n\n roberto.penup()\n roberto.goto(-345, -100)\n roberto.pendown()\n roberto.begin_fill()\n for i in range(4): # this loop draws a big black rectangle that is positioned at the bottom part of the screen\n roberto.forward(684)\n roberto.right(90)\n roberto.end_fill()", "def echo(self, foetus):\n Allele_semblable = 0\n for Allele in range(3):\n if self.allele[Allele] in foetus.allele and self.allele[Allele] != 0.0:\n Allele_semblable = Allele\n if Allele_semblable == 0:\n Allele_Echo = self.allele[Allele_semblable + 1]\n for Alleles_foetus in range(3):\n if foetus.allele[Alleles_foetus] - 1 == Allele_Echo:\n foetus.informatif = 3\n elif Allele_semblable == 1:\n Allele_Echo = self.allele[Allele_semblable - 1]\n for Alleles_foetus in range(3):\n if foetus.allele[Alleles_foetus] - 1 == Allele_Echo:\n foetus.informatif = 3", "def display_praises(self):\n player = self.caller\n praises_or_condemns = player.get_current_praises_and_condemns()\n praises = praises_or_condemns.filter(value__gte=0)\n condemns = praises_or_condemns.filter(value__lt=0)\n msg = \"Praises:\\n\"\n table = EvTable(\"Name\", \"Praises\", \"Value\", \"Message\", width=78, align=\"r\")\n for praise in praises:\n table.add_row(\n praise.target,\n praise.number_used,\n \"{:,}\".format(praise.value),\n praise.message,\n )\n msg += str(table)\n msg += \"\\nCondemns:\\n\"\n table = EvTable(\"Name\", \"Condemns\", \"Value\", \"Message\", width=78)\n for pc in condemns:\n table.add_row(pc.capitalize(), condemns[pc][0], condemns[pc][1])\n msg += str(table)\n msg += \"\\nPraises or Condemns remaining: %s\" % self.get_actions_remaining()\n return msg", "def es_satisfecho_por(self, candidata):", "def _draw_apple(self):\n if self._apple is not None:\n self._sensehat.set_pixel(self._apple.position.x, self._apple.position.y, self._apple.color)", "def action_house_reveal(self) -> None:\n self.house.hand.reveal_hand()\n print(f\"\\nThe house reveals their hand containing: {self.house.hand}, totalling to {self.house.hand.value}\")", "def imprimir(self):\n print(\"\\n\".join(self.progeso))", "def sit(self):\n\n\t\tself.pose.goToPosture(\"Crouch\")", "def show_reward(self, reward_):\n reward = reward_.reshape(self.cols, self.rows).T\n self.show_heatmap(reward, \"Pseudo Reward\")", "def talk_with_grandma(self):\n item_count = len(self.inventory())\n if item_count == 1:\n self.dialogue.run(self.inventory()[0].description.upper())\n elif item_count == 2:\n self.dialogue.run(\"TWO\")\n elif item_count == 3:\n self.dialogue.run(\"THREE\")\n elif item_count == 4:\n self.dialogue.run(\"SOUP\")\n self.open_modal(self.modal)", "def drawEyes(win, winW, winH):\n# leftEye = Oval(Point(300-120-40, 300-80-20), Point(300-120+40, 300-80+20))\n leftEye = Oval(Point(winW/2-winW/5-winW/15, winH/2-winH/7.5-winH/30),\n Point(winW/2-winW/5+winW/15, winH/2-winH/7.5+winH/30))\n leftEye.setFill(\"white\")\n leftEye.setOutline(\"black\")\n leftEye.draw(win)\n leftIris = Circle(Point(winW/2-winW/5, winH/2-winH/7.5), winH/40)\n leftIris.setOutline(\"black\")\n leftIris.setFill(\"darkcyan\")\n leftIris.draw(win)\n leftPupil = Circle(Point(winW/2-winW/5, winH/2-winH/7.5), winH/120)\n leftPupil.setOutline(\"black\")\n leftPupil.setFill(\"black\")\n leftPupil.draw(win)\n rightEye = leftEye.clone()\n rightEye.move(winW/2-winW/10,0)\n rightEye.draw(win)\n rightIris = leftIris.clone()\n rightIris.move(winW/2-winW/10,0)\n rightIris.draw(win)\n rightPupil = leftPupil.clone()\n rightPupil.move(winW/2-winW/10,0)\n rightPupil.draw(win)", "def announce(outcome, who):\r\n print(who, 'rolled a', outcome)\r\n print(draw_number(outcome))", "def draw_housing():\r\n green.pensize(3)\r\n green.color(\"black\", \"darkgrey\")\r\n green.begin_fill()\r\n green.forward(80)\r\n green.left(90)\r\n green.forward(200)\r\n green.circle(40, 180)\r\n green.forward(200)\r\n green.left(90)\r\n green.end_fill()", "def draw_housing():\n tess.pensize(3)\n tess.color(\"black\", \"darkgrey\")\n tess.begin_fill()\n tess.forward(80)\n tess.left(90)\n tess.forward(200)\n tess.circle(40, 180)\n tess.forward(200)\n tess.left(90)\n tess.end_fill()", "def trace(self,p):\n n = self\n c=0 \n while n!=None :\n print (n)\n n = n.pere\n c+=1\n print (\"Nombre d'étapes de la solution:\", c-1)\n return", "def propose_stone(self):\n colours = ['red', 'blue']\n print(\"Set a \"+colours[player_sequence(self.number)]+\" stone.\")\n return input_stone_position()", "def hook_displaysuspicions(self):\n charMarginal = (np.array(self.hypCountByCharacter,dtype=np.float64)\n / np.sum(self.hypCountByCharacter))\n roomMarginal = (np.array(self.hypCountByRoom,dtype=np.float64)\n / np.sum(self.hypCountByRoom))\n weapMarginal = (np.array(self.hypCountByWeapon,dtype=np.float64)\n / np.sum(self.hypCountByWeapon))\n ui.plotscenariomarginals(charMarginal,roomMarginal,weapMarginal)\n ui.plotforbidden(self.forbidden)", "def play(self):\n b = int(input(\"玩几把:\"))\n flag = 0\n money = []\n while True:\n flag += 1\n histroy_, bet = self.draw()\n money.append(histroy_)\n if histroy_ < self.bet:\n print('您只能玩个 %s 把...' % flag)\n break\n\n if flag == b:\n print('次数已用完!!!')\n break\n\n self.plot(flag, money)", "def podziel(self):\n def fraktal(dlugosc, alpha, poziom):\n \"\"\"Metoda wyznaczajaca fraktal.\n\n Metoda ta przyjmuje dlugosc, kat oraz poziom drzewa.\n Na bazie podanych parametrow wylicza fraktal z podanych w zadaniu wzorow.\n Zwraca liste zawierajaca punkX oraz punktY fraktalu.\n \"\"\"\n#obliczanie punktow punktu Abis dla kazdego poziomu galezi\n x = float(self.p2[0] + self.dlugosc * cos(alpha))\n y = float(self.p2[1] + self.dlugosc * sin(alpha))\n return [round(x), round(y)]\n\n#petla przechodzaca po wszystkich poziomach drzewa\n while self.tmp <= self.poziom:\n#obliczanie grubosci, dlugosci galezi oraz kolorowanie jej\n self.grubosc = float((2 * self.grubosc + 1) / 3)\n self.dlugosc = float((2 * self.dlugosc) / 3)\n self.kolor += 6\n\n #sprawdzenie czy kolor nie wyszedl po za skale maksymalnej wartosci\n if self.kolor > 255:\n self.kolor = 255\n\n#rozbicie obliczen na poziom 1 i wyzej\n#Abis jest to punkt prawy dla kazdej galezi\n#B jest to punkt srodkowy dla kazdej galezi\n#C jest to punkt srodkowy dla kazdej galezi\n\n#obliczenia dla pierwszego poziomu\n if self.tmp < 2:\n#obliczenie fraktalu, prawa galaz dla kazdej galezi\n#podstawienie obliczonych wartosci z punktu Abis do pozostalych wedlug podanych wzorow\n Abis = fraktal(self.dlugosc, self.alpha, self.poziom)\n B = [round(self.p2[0]), round(Abis[1])]\n C = [round(-Abis[0] + 2 * self.p2[0]), round(Abis[1])]\n\n#zwiekszenie poziomu drzewa o jeden\n self.tmp += 1\n\n#tutaj nastepuje zwrocenie obiektow typu Branch z nowo obliczonymi wartosciami\n return [Branch(self.p2, Abis, self.dlugosc, self.grubosc, self.kolor, self.alpha, self.tmp),\n Branch(self.p2, B, self.dlugosc, self.grubosc, self.kolor, self.alpha, self.tmp),\n Branch(self.p2, C, self.dlugosc, self.grubosc, self.kolor, self.alpha, self.tmp)]\n#obliczenia poziomow wyzej niz pierwszy\n else:\n#obliczanie kata dla punktu prawego\n self.zetprim = randint(-1, 1) * randint(1, self.s)\n self.beta = self.alpha + self.zetprim\n\n#obliczanie kata dla punktu srodkowego\n self.zetbis = randint(-1, 1) * randint(1, self.s)\n self.gamma = self.alpha + self.zetbis\n\n#obliczanie kata dla punktu lewego\n self.zetter = randint(-1, 1) * randint(1, self.s)\n self.teta = self.alpha + self.zetter\n\n#obliczenie fraktalu, prawa galaz dla kazdej galezi\n#podstawienie obliczonych wartosci z punktu Abis do pozostalych wedlug podanych wzorow\n Abis = fraktal(self.dlugosc, self.beta, self.poziom)\n B = [round(self.p2[0]), round(Abis[1])]\n C = [round(-Abis[0] + 2 * self.p2[0]), round(Abis[1])]\n\n#zwiekszenie poziomu drzewa o jeden\n self.tmp += 1\n\n#tutaj nastepuje zwrocenie obiektow typu Branch z nowo obliczonymi wartosciami\n return [Branch(self.p2, Abis, self.dlugosc, self.grubosc, self.kolor, self.beta, self.tmp),\n Branch(self.p2, B, self.dlugosc, self.grubosc, self.kolor, self.gamma, self.tmp),\n Branch(self.p2, C, self.dlugosc, self.grubosc, self.kolor, self.teta, self.tmp)]", "def draw():", "def drawPoles(wn):\n wn.setworldcoordinates(-1, -5, 3, 20)\n t = turtle.Turtle()\n t.speed(0)\n t.pensize(3)\n t.up()\n t.goto(-.5, 0)\n t.down()\n t.goto(2.5, 0)\n t.up()\n for i in range(3):\n t.goto(i, 0)\n t.down()\n t.goto(i, 10)\n t.up()\n t.hideturtle()", "def render_propic(self):\n if self.user_data.platform == \"steam\":\n propic_data = self.steam.get_user_propic(user_id=self.user_data.id)\n else:\n propic_data = open(\"app/modules/img/oculus.png\", \"rb\")\n propic = Image.open(propic_data)\n propic = propic.convert(\"RGBA\")\n propic = self._add_corners(propic, rad=30)\n return propic", "def mezclar_bolsa(self):", "def pre_draw(self):", "def step(self):\n\n percept = self.getPerception(self.agent.capteurs)\n choix = self.agent.getDecision(percept)\n self.__historique.append(((self.table, self.posAgent), choix))\n self.agent.setReward(self.applyChoix(choix))\n self.updateWorld()", "def draw(coorlist):\n tur = turtle.Turtle()\n for coor in coorlist:\n tur.penup()\n tur.goto(coor[0][0],coor[0][1])\n tur.pendown()\n tur.goto(coor[0][0],coor[1][1])\n tur.goto(coor[1][0],coor[1][1])\n tur.goto(coor[1][0],coor[0][1])\n tur.goto(coor[0][0],coor[0][1])", "def wolf_visualize_policy(policy: List[float], player: int):\n src_point = np.asarray(\n [[policy[idx][0], policy[idx][1]] for idx in range(len(policy) - 1)]\n )\n dst_point = np.asarray(\n [[policy[idx + 1][0], policy[idx + 1][1]] for idx in range(len(policy) - 1)]\n )\n\n for src, dst in zip(src_point, dst_point):\n plt.plot([src[0]], [src[1]], marker=\"o\", markersize=3, color=\"red\")\n plt.plot([src[0], dst[0]], [src[1], dst[1]], \"k-\")\n\n plt.plot(\n [dst_point[-1][0]], [dst_point[-1][1]], marker=\"o\", markersize=3, color=\"red\"\n )\n\n plt.xlim((0, 1))\n plt.ylim((0, 1))\n\n plt.xlabel(\"Pr(Rock)\")\n plt.ylabel(\"Pr(Paper)\")\n if player == 1:\n plt.title(\"RPS 1st player policy visualisation\")\n elif player == 2:\n plt.title(\"RPS 2nd player policy visualisation\")\n plt.savefig(f\"q2_wolf_agent{player}_pi.pdf\", format=\"pdf\")\n\n plt.show()", "def straight(handIn):", "def show_game_mission():\n print_bold(\"Misija:\")\n print(\"\\tOdaberi kućicu u kojoj se Talion može odmoriti ...\")\n print_bold(\"SAVJET:\")\n print(\"PAZI kako biraš jer neprijatelji su blizu!\")\n print_dotted_line()", "def printPokemon():\n print(\" _ \")\n print(\" _ __ ___ | | _____ _ __ ___ ___ _ __ \")\n print(\" | '_ \\ / _ \\| |/ / _ \\ '_ ` _ \\ / _ \\| '_ \\ \")\n print(\" | |_) | (_) | < __/ | | | | | (_) | | | |\")\n print(\" | .__/ \\___/|_|\\_\\___|_| |_| |_|\\___/|_| |_|\")\n print(\" |_| \")", "def _step(self, a):\n state, rew, done, info = super()._step(a)\n render = self.get_render_obs()\n return render, sum(self.rewards), bool(done), {}", "def _drawPose(self, pose, layer=0, existing=[]):\n items = existing\n if not items:\n items.extend([\n self._plt.plot([pose.x], [pose.y],\n pxMode=False,\n pen=None,\n symbolSize=_ROBOT_RADIUS,\n symbol='o',\n symbolPen=_PO_PEN,\n symbolBrush=_PO_BRUSH),\n self._plt.plot([pose.x], [pose.y],\n pen=None,\n symbol=Visualiser._triangleSymbol(pose.th),\n symbolPen=_PO_PEN,\n symbolBrush=_PO_BRUSH)\n ])\n else:\n items[-2].setData([pose.x], [pose.y])\n items[-1].setData([pose.x], [pose.y],\n symbol=Visualiser._triangleSymbol(pose.th))\n\n Visualiser._setLayer(items, layer)\n return items", "def appears(self):", "def calculateUSky(self):\n skyline = []\n for p in self.pruned:\n pastart = [self.drange[0] for i in range(self.dim)]\n pamax = p.getLocationMax()\n pdom = list(self.index.intersection(tuple(pastart+pamax),objects=True))\n if len(pdom) == 1 and pdom[0].object == p:\n skyline.append([p, 1.0])\n else:\n finalp = 0.0\n for i in range(p.getPCount()):\n base = p.getProb(i)\n loc = p.getLocation(i)\n intersec = list(self.index.intersection(tuple(pastart+loc),objects=True))\n for d in intersec:\n dobj = d.object\n if dobj != p:\n tprob = 0.0\n for idx in range(dobj.getPCount()):\n if dominateStat(dobj.getLocation(idx),loc) == True:\n tprob += dobj.getProb(idx)\n tprob = 1.0 - tprob\n base *= tprob\n finalp += base\n skyline.append([p, finalp])\n for p in skyline:\n print(p[0])\n print(p[1])\n print(\"\")\n # print(skyline)", "def punch(self):\n # you are not working, futher investagtion needed...\n if self.weight < 5:\n return \"That tickles.\"\n elif self.weight < 15:\n return \"Hey that hurt!\"\n else:\n return \"OUCH!\"", "def update_goal(self):\n pass", "def drawCoordinatePlane_income():\r\n turtle = t.Screen()\r\n turtle.title(\"Life Expectancy versus Income Category\")\r\n t2.speed(0)\r\n t3.speed(0)\r\n setTurtle(t0)\r\n setTurtle(t1)\r\n setTurtle(t2)\r\n setTurtle(t3)\r\n drawAxes(t0)\r\n t1.left(90)\r\n drawAxes(t1)\r\n t0.pu()\r\n t0.fd(-80)\r\n t0.lt(90)\r\n drawlabels(t0, t1)\r\n drawPoints(t0, t1)\r\n drawIndex_income(t0, t1, t2, t3)\r\n drawIndexLines_income(t0, t1, t2, t3)\r\n t0.pu()\r\n t1.pu()\r\n t2.pu()\r\n t3.pu()\r\n t0.goto(initialCoordinates())\r\n t1.goto(initialCoordinates())\r\n t2.goto(initialCoordinates())\r\n t3.goto(initialCoordinates())\r\n t1.lt(90)", "def create_football_field(figsize=(12*2, 6.33*2), goals=True):\n\n #pitch outline & centre line\n pitch = patches.Rectangle((-52.5, -35), 105, 70, linewidth=2,capstyle='round',\n edgecolor='w', facecolor='darkgreen')\n\n fig, ax = plt.subplots(1, figsize=figsize)\n fig.patch.set_facecolor('green')\n fig.patch.set_alpha(0.7)\n\n ## goals\n if goals:\n plt.plot([-52.5, -55, -55, -52.5], [-5, -5, 5, 5], c='w', linewidth=2)\n plt.plot([52.5, 55, 55, 52.5], [-5, -5, 5, 5], c='w', linewidth=2)\n\n ## middle line\n midline = patches.ConnectionPatch([0,-35], [0,35], \"data\", \"data\", color='white')\n\n #center circle\n centreCircle = plt.Circle((0,0), 10, color=\"white\", fill = False, linewidth=2)\n centreSpot = plt.Circle((0,0), 0.3, color=\"white\", linewidth=2)\n\n #left, right penalty area\n leftPenalty = patches.Rectangle([-52.5,-15], width=14.5, height=30, fill = False,\n color='white', linewidth=2)\n rightPenalty = patches.Rectangle([38.0,-15], width=14.5, height=30, fill = False,\n color='white', linewidth=2)\n\n #left, right 6-yard box\n leftSixYard = patches.Rectangle([-52.5,-8], width=4.5, height=16, fill=False,\n color='white', linewidth=2)\n rightSixYard = patches.Rectangle([48,-8], width=4.5, height=16, fill=False,\n color='white', linewidth=2)\n\n #penalty spots\n leftPenSpot = plt.Circle((-43.5,0),0.3, color=\"white\", linewidth=2)\n rightPenSpot = plt.Circle((43.5,0),0.3, color=\"white\", linewidth=2)\n\n element = [pitch, midline, centreCircle, centreSpot, leftPenalty, rightPenalty, leftSixYard,\n rightSixYard, rightPenSpot, leftPenSpot]\n\n for i in element:\n ax.add_patch(i)\n\n plt.xlim(-56, 56)\n plt.ylim(-37, 37)\n plt.axis('off')\n return fig, ax", "def head_with_hair(hairfunc):\n print (hairfunc())\n print (eye_narrow())\n print (nose_triangle())\n print (mouth_smile())\n print (chin_plain())" ]
[ "0.56292534", "0.5618225", "0.5552471", "0.54650366", "0.5445228", "0.54324245", "0.5407945", "0.5407678", "0.5380815", "0.5354937", "0.5342857", "0.5342857", "0.52858186", "0.52716845", "0.52669555", "0.5259555", "0.52410096", "0.52402276", "0.5233314", "0.5229365", "0.5206128", "0.5200896", "0.5200657", "0.52003425", "0.52003425", "0.52003425", "0.52003425", "0.52003425", "0.5194718", "0.51941323", "0.5180083", "0.5160251", "0.51601183", "0.5154544", "0.5128878", "0.51269644", "0.51171255", "0.5093285", "0.506927", "0.5062926", "0.506249", "0.50588113", "0.50573117", "0.50508547", "0.5050653", "0.50494236", "0.5046031", "0.50456345", "0.50424594", "0.50400007", "0.5039441", "0.5036436", "0.5015378", "0.50123626", "0.5010865", "0.4990531", "0.49903286", "0.49883774", "0.49870342", "0.4979167", "0.49774188", "0.4976725", "0.49760476", "0.4975882", "0.49756202", "0.4975467", "0.4975173", "0.49741766", "0.49645293", "0.49568164", "0.49559462", "0.49510008", "0.49413887", "0.49387488", "0.49381822", "0.49339122", "0.49321088", "0.49311316", "0.49234676", "0.4923251", "0.49222362", "0.49207157", "0.4913887", "0.49131045", "0.49104938", "0.4910002", "0.49048957", "0.49033275", "0.4903294", "0.49004707", "0.48989058", "0.4888113", "0.48880267", "0.48870492", "0.48813275", "0.4881287", "0.4878652", "0.48754284", "0.48721644", "0.4868871" ]
0.4950288
72
Returns the current date. That means every time the project is deployed, the datestamp will update. Returns a formatted date object, ala "Friday Feb. 20"
def last_update(blank): today = date.today() return today.strftime('%A %B %d')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current_date():\n return datetime.datetime.today().strftime(constants.DATE_FORMAT)", "def get_date_today() -> str:\n return datetime.now().strftime(\"%Y-%m-%d\")", "def get_current_date(fmt=\"%Y-%m-%d\"):\n return datetime.datetime.now().strftime(fmt)", "def get_date():\n now = datetime.now()\n date = now.strftime(\"%Y%m%d\")\n return date", "def date_now():\n return datetime.today().strftime('%c')", "def current_date_time_stamp():\n return datetime.now().strftime('%Y.%m.%d %H:%M:%S.%f')[:-7]", "def date() -> str:\n\n return datetime.strftime(datetime.today(), _fmt)", "def get_date():\n dt = datetime.now()\n return dt.strftime(\"%Y-%m-%d\")", "def get_date():\n return datetime.datetime.now()", "def formalDateToday():\n return dt.date.today().strftime(\"%B %d, %Y\")", "def _today() -> str:\n return strftime(DATE_FORMAT, gmtime())", "def getdate():\r\n import datetime\r\n return datetime.datetime.now()", "def get_date():\n return datetime.now().strftime(\"%c\")", "def today():\n today_object = datetime.utcnow()\n today_string = today_object.strftime('%m/%d/%Y')\n return today_string", "def actual_date():\n actual_date = datetime.now()\n return str(actual_date.day) + '-' + str(actual_date.month) + '-' + str(actual_date.year)", "def _getCurrentDateString(self):\n currentDateTime = datetime.now()\n return currentDateTime.strftime(\"%Y%m%d_%H%M\")", "def getDate():\n current_time = datetime.datetime.now()\n day = current_time.day\n month = current_time.month\n year = current_time.year\n date = \"{dd}-{mm}-{yyyy}\".format(dd=day,mm=month,yyyy=year)\n return date", "def _get_date():\n return datetime.datetime.now()", "def timestamp(): \n timestamp = datetime.today().strftime('%Y-%m-%d')\n \n return timestamp", "def todayDate(self):\n return time.strftime(\"%m/%d/%Y\", time.localtime())", "def todaystr():\n today = datetime.datetime.today()\n return f\"{today.year}{today.month:02}{today.day:02}\"", "def get_date():\n return str(datetime.now()).split(' ')[0]", "def get_today_date():\n return date.today()", "def get_current_datetime_string ( ) :\n return get_current_datetime( ).strftime( \"%Y%m%d-%H%M%S\" )", "def get_date():\n\n return datetime.datetime.utcnow().isoformat()", "def least_current_date():\n # This is not the right way to do it, timezones can change\n # at the time of writing, Baker Island observes UTC-12\n return datetime.now(timezone(timedelta(hours=-12))).strftime(\"%Y-%m-%d\")", "def get_gds_current_date(self, remove_leading_zero='true'):\r\n time_now = datetime.datetime.now().time()\r\n today_2pm = time_now.replace(hour=14, minute=31, second=0, microsecond=0)\r\n if time_now < today_2pm:\r\n gds_date = datetime.datetime.now() - datetime.timedelta(days=int(1))\r\n else:\r\n gds_date = datetime.datetime.now()\r\n\r\n if remove_leading_zero.lower() == 'true':\r\n return str('{dt.day}{dt:%b}'.format(dt=gds_date).upper())\r\n else:\r\n return self._set_gds_date_format(gds_date)", "def today(self):\n return(datetime.date.today().isoformat())", "def createTimeStamp_Date():\r\n\r\n return str(datetime.now().strftime(\"%Y%m%d\"))", "def date():\r\n try:\r\n curr_date = datetime.datetime.now().strftime(\"%b %d %Y\")\r\n except Exception as e:\r\n print(e)\r\n curr_date = False\r\n return curr_date", "def default_date(self):\n return datetime.datetime.now().strftime('%Y-%m-%d')", "def date(*args):\n current_date = datetime.now().isoformat(' ').split('.')[0]\n send.system_message(current_date)", "def current_valid_date(self):\r\n return datetime.datetime.now().strftime('%Y-%m-%d')", "def _timestamp_now(self) -> str:\n date_now = datetime.utcnow().replace(tzinfo=timezone(\"UTC\"))\n if self._app_conf[\"aiscalator\"]:\n pst = timezone(self.app_config().timezone)\n else:\n pst = timezone('Europe/Paris')\n return date_now.astimezone(pst).strftime(\"%Y%m%d%H%M%S\")", "def get_today():\n return datetime.today()", "def get_now():\n\treturn datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")", "def today(cls):\n return date()", "def get_today() -> datetime.date:\n return datetime.date.today()", "def get_now():\r\n now = dt.datetime.now()\r\n now_str = now.strftime(\"%d/%m %H:%M\")\r\n return now_str", "def get_date(self):\n return self.date.strftime(\"%a %x\")", "def _today() -> datetime.date:\n return datetime.today().date()", "def TODAY():\n return datetime.date.today()", "def date_stamp():\n return datetime.fromtimestamp(time()).strftime('%Y.%m.%d')", "def current_day():\n now = pytz.timezone('America/Los_Angeles').localize(datetime.now())\n return now.strftime('%m/%d')", "def get_today(self):\n # using now() to get current time\n current_time = datetime.datetime.now()\n day = str(current_time.day)\n month = str(current_time.month)\n\n day = self.check_and_repair_right_format(day)\n month = self.check_and_repair_right_format(month)\n\n return str(current_time.year) + month + day", "def today():\n return date.today()", "def get_date():\n return (datetime.now() - TIMEDELTA).isoformat()", "def date_printer():\n curr_date = datetime.date.today()\n\n print(format_date(curr_date))", "async def date(self) -> dt.date:\n now = await self.AD.sched.get_now()\n return now.astimezone(self.AD.tz).date()", "def get_pub_date():\n return datetime.datetime.now()", "def today():\n return datetime.today()", "def build_date(self) -> str:\n data = \"none yet\"\n if self.STARTED:\n data = self.about.get(\"Build Date\", \"UNKNOWN\")\n return data", "def _create_time_stamp() -> str:\n\n return datetime.datetime.now().strftime(\"%Y%m%d\")", "def getApplicationBuildDate(self) -> unicode:\n ...", "def get_date():\n now=datetime.now()\n s=\"%s%s%s\" % (now.year, str(now.month).zfill(2), str(now.day).zfill(2))\n return (now, s)", "def get_date(format_of_date):\n current_date = datetime.datetime.today().strftime(format_of_date) # \"%d%m%Y\"\n return current_date", "def utc_today_str():\n return datetime.datetime.strftime(datetime.datetime.utcnow(), \"%Y-%m-%d\")", "def get_date_DM(): \n \n now = date.datetime.now()\n date_DM = str(now.day)+'_'+str(now.month)+'/' \n return date_DM", "def _create_date(self):\n return strftime(\"%a, %d %b %Y %H:%M:%S GMT\", gmtime())", "def get_datecode():\n now = datetime.utcnow()\n return now.strftime(\"%Y%m%d\")", "def get_current_datetime ( ) :\n return datetime.datetime.now( )", "def current_datetime(self):\n return DateAccessor().today()", "def now():\n return datetime.now().strftime(FORMAT)", "def get_current_day():\n current_day = datetime.now().strftime('%A').lower()\n return current_day", "def get_now():\n return datetime.now()", "def get_now():\n return datetime.now()", "def get_date_time():\n date_time = datetime.now()\n date_time_string = date_time.strftime(\"%b-%d-%Y (%H:%M:%S)\")\n return date_time_string", "def static_now():\n return datetime.datetime(2000, 9, 4).replace(tzinfo=timezone.utc)", "def timestamp_now():\n return datetime.now().strftime(\"%A, %B %d, %Y, %I:%M %p\")", "def now_datetime():\n now = datetime.datetime.now()\n return now.strftime('%Y%m%d%H%M%S')", "def today_string(fmt='%Y-%m-%d'):\n return brasilia_time().strftime(fmt)", "def __get_settlement_date():\n day_after_tomorrow = datetime.now(timezone.utc).date() + \\\n timedelta(days=2)\n settlement_date = day_after_tomorrow.strftime(\"%Y%m%d\")\n\n return settlement_date", "def now():\n return datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')", "def get_current_timestamp_str(self):\n return str(time.mktime(datetime.datetime.now().timetuple()))", "def get_time_stamp_str() -> str:\n return datetime.datetime.now().strftime(DateFormat)", "def nowdt():\n from datetime import datetime\n\n now = datetime.now()\n return now.strftime(\"%d/%m/%Y %H:%M:%S\")", "def today():\n today = datetime.utcnow() \n return datetime(today.year, today.month, today.day)", "def get_fecha_actual():\n hoy = datetime.datetime.now()\n fecha_actual = hoy.strftime(\"%d-%m-%Y\")\n return fecha_actual", "def time_stamper() :\n\treturn datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")", "def db_date():\n return datetime.datetime.now()", "def get_datetime_string():\n return datetime.now().strftime(DATETIME_FORMAT)", "def now():\n now = datetime.datetime.now()\n return \"%04d-%02d-%02d %02d:%02d:%02d.%03d\" % ( now.year, now.month,now.day,\n now.hour,now.minute,now.second,int(now.microsecond/1e3))", "def time_now():\n ts = datetime.datetime.now().timetuple()\n return '{wday} {day} {month} {year} {hour}:{minute:0>2d}:{second:0>2d} UTC'.format(\n year=ts.tm_year, month=calendar.month_name[ts.tm_mon],\n day=ts.tm_mday, wday=calendar.day_name[ts.tm_wday],\n hour=ts.tm_hour, minute=ts.tm_min, second=ts.tm_sec)", "def now():\r\n return datetime.datetime.now()", "def date(self):\r\n year = int(datetime.datetime.now().year)\r\n month = int(datetime.datetime.now().month)\r\n date = int(datetime.datetime.now().day)\r\n month_list = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\r\n self.speak(\"the current date is\")\r\n self.speak(date)\r\n self.speak(month_list[month - 1])\r\n self.speak(year)", "def get_todays_date(self):\r\n \r\n date=str(dt.datetime.today())\r\n raw_date=date.split(\" \")[0]\r\n Day=raw_date.split(\"-\")[-1]\r\n Month=raw_date.split(\"-\")[-2]\r\n Year=raw_date.split(\"-\")[-3]\r\n todays_date=Day+\"-\"+Month+\"-\"+Year\r\n return todays_date", "def now(self):\n os.environ['TZ'] = conf.timezone\n time.tzset()\n return time.strftime(\"%B %d %Y %H:%M:%S IST\", time.localtime())", "def date_created(self) -> str:\n return pulumi.get(self, \"date_created\")", "def get_now():\n right_now = datetime.datetime.now()\n return (\"%04d%02d%02d-%02d:%02d:%02d\"\n % (right_now.year, right_now.month, right_now.day,\n right_now.hour, right_now.minute, right_now.second))", "def get_now() -> str:\n global _NOW\n if _NOW is None:\n _NOW = str(datetime.now().replace(microsecond=0))\n return _NOW", "def now():\n return datetime.datetime.now()", "def get_date_display(self, context):\n return '{year}/{month}/{day}'.format(\n year=self.get_year(),\n month=self.get_month().zfill(2),\n day=self.get_day().zfill(2))", "def currentDay(self):\n day = datetime.datetime.today().day\n return day", "def getdate():\n return strftime(\"%A %B %d, %I:%M %p\")", "def date_time():\n\n\treturn datetime.now().strftime(\"%d%m%Y_%H%M%S\")", "def plastic_date():\n return 'Zun, 99 Zun 9999 99:61:61'", "def get_timestamp():\n return datetime.now().strftime(TIMESTAMP_FORMAT)", "def get_date():\n\n return tz.get_brisbane_time().date()", "def current_time():\n now = datetime.now().strftime(\"%Y/%m/%d %H:%M:%S.%f\")\n return now", "def get_current_time():\n return time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())" ]
[ "0.7824893", "0.7507471", "0.7391598", "0.73774505", "0.73269105", "0.7283762", "0.72370684", "0.7218445", "0.7215881", "0.71713823", "0.7162151", "0.7127916", "0.7113581", "0.71124613", "0.71002764", "0.70509845", "0.7046391", "0.702825", "0.6980741", "0.69473976", "0.69437426", "0.6925347", "0.69251144", "0.69204974", "0.6905852", "0.6868918", "0.6865973", "0.684093", "0.6820278", "0.6808474", "0.68077207", "0.6803865", "0.67872465", "0.6782503", "0.67730933", "0.67287856", "0.6726859", "0.6686163", "0.667659", "0.66734314", "0.6665396", "0.66457653", "0.663833", "0.6637698", "0.66359746", "0.663268", "0.662306", "0.6616052", "0.6613909", "0.6585216", "0.6561325", "0.65592515", "0.65192527", "0.65140027", "0.651358", "0.6510203", "0.64973927", "0.6466341", "0.64488703", "0.6448333", "0.64481276", "0.64441293", "0.6442378", "0.63918215", "0.638668", "0.638668", "0.6369269", "0.63599753", "0.6351102", "0.6330363", "0.6328739", "0.6325732", "0.632564", "0.6325078", "0.63188475", "0.63086134", "0.6307574", "0.6306539", "0.63041246", "0.63008803", "0.6287201", "0.6278705", "0.6262907", "0.6255015", "0.6254857", "0.62511986", "0.62218076", "0.6201069", "0.6186023", "0.6179282", "0.6170325", "0.6162791", "0.6160196", "0.6158259", "0.6151326", "0.6146105", "0.61366844", "0.61302865", "0.6119882", "0.6114729" ]
0.6653718
41
What's the current date and time?
def timestamp(blank): today = datetime.today() return today.strftime("%A %B %d, %-I:%M %p")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current_time():\n return datetime.now()", "def get_current_time():\n return datetime.datetime.now()", "def current_time():\n now = datetime.now().strftime(\"%Y/%m/%d %H:%M:%S.%f\")\n return now", "def current_time():\n return time.time()", "def get_current_datetime ( ) :\n return datetime.datetime.now( )", "def current_time():\n now = datetime.datetime.now()\n time = now.strftime(\"%Y-%m-%d %H:%M:%S:%f\")\n return time", "def now():\r\n return datetime.datetime.now()", "def get_now():\n\treturn datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")", "def now():\n print(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))", "def time_now():\n return datetime.datetime.now().time()", "def getdate():\r\n import datetime\r\n return datetime.datetime.now()", "def get_current_time():\n return time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())", "def now():\r\n return time.time()", "def now(self):\r\n return time.ctime(time.time())", "def now():\n return datetime.datetime.now()", "def now():\n return time.strftime(\"%Y_%m_%d_%H_%M_%S\")", "def nowdt():\n from datetime import datetime\n\n now = datetime.now()\n return now.strftime(\"%d/%m/%Y %H:%M:%S\")", "def now_datetime():\n now = datetime.datetime.now()\n return now.strftime('%Y%m%d%H%M%S')", "def _get_current_time() -> str:\n return datetime.now().strftime(\"%FT%H:%M:%S\")", "def time_now():\n cur_time = str(datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\"))\n return cur_time", "def current_time(self):\n return self._current_time", "def get_current_time(self):\n return self.time", "def time_now():\n return time.time()", "def now():\n now = datetime.datetime.now()\n return \"%04d-%02d-%02d %02d:%02d:%02d.%03d\" % ( now.year, now.month,now.day,\n now.hour,now.minute,now.second,int(now.microsecond/1e3))", "def get_time():\n return datetime.datetime.now()", "def now(self):\n os.environ['TZ'] = conf.timezone\n time.tzset()\n return time.strftime(\"%B %d %Y %H:%M:%S IST\", time.localtime())", "def get_date():\n return datetime.datetime.now()", "def get_now():\n return datetime.now()", "def get_now():\n return datetime.now()", "def time_now():\n ts = datetime.datetime.now().timetuple()\n return '{wday} {day} {month} {year} {hour}:{minute:0>2d}:{second:0>2d} UTC'.format(\n year=ts.tm_year, month=calendar.month_name[ts.tm_mon],\n day=ts.tm_mday, wday=calendar.day_name[ts.tm_wday],\n hour=ts.tm_hour, minute=ts.tm_min, second=ts.tm_sec)", "def currentTime():\n zone='America/Bogota'\n getDate = datetime.now(pytz.timezone(zone));\n #Format -> d/m/Y H:M:S\n return getDate", "def now(self):\n return time.strftime(r'[%d/%b/%Y:%H:%M:%S]')", "def get_now():\r\n now = dt.datetime.now()\r\n now_str = now.strftime(\"%d/%m %H:%M\")\r\n return now_str", "def timestamp_now():\n return datetime.now().strftime(\"%A, %B %d, %Y, %I:%M %p\")", "def date_now():\n return datetime.today().strftime('%c')", "def time_now() -> str:\n return datetime_to_str(datetime_now())", "def get_current_date():\n return datetime.datetime.today().strftime(constants.DATE_FORMAT)", "def now(self):\n return datetime.datetime.now()", "def now(self):\n return datetime.datetime.now()", "def get_current_time():\n return int(time.time())", "def now():\n return datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')", "def get_now_time():\r\n return '[' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f') + ']: '", "def currentTime():\n return strftime(\"%H:%M:%S\", time.localtime())", "def get_current_datetime_string ( ) :\n return get_current_datetime( ).strftime( \"%Y%m%d-%H%M%S\" )", "def current_date_time_stamp():\n return datetime.now().strftime('%Y.%m.%d %H:%M:%S.%f')[:-7]", "def current_datetime(self):\n return DateAccessor().today()", "def now():\n return utcfromtimestamp(time.time())", "def get_time(self):\n return self._current_time", "def get_time(self):\n return self._current_time", "def get_current_time():\n dateTime = datetime.datetime.now()\n # \"%Y-%m-%d %H:%M:%S:%f\" is default formatting with everything\n dateTime = dateTime.strftime(\"%m-%d-%y %H:%M:%S\")\n\n logger.logger.debug(\"Getting current time: {}\".format(dateTime))\n\n return dateTime", "def get_now():\n right_now = datetime.datetime.now()\n return (\"%04d%02d%02d-%02d:%02d:%02d\"\n % (right_now.year, right_now.month, right_now.day,\n right_now.hour, right_now.minute, right_now.second))", "def date_time():\n\n\treturn datetime.now().strftime(\"%d%m%Y_%H%M%S\")", "def now():\n return datetime.datetime.utcnow()", "def _get_date():\n return datetime.datetime.now()", "def now():\n\treturn time.time() * 1000", "def now():\n if os.sys.platform == 'win32':\n return time.clock() # best for windows? seems to give finer temporal resolution.\n else:\n return time.time() # best for Unix, others???", "def current_timestamp(self) -> time:\n\n return time.time()", "def get_system_date_and_time(self):\n return self.mycam.devicemgmt.GetSystemDateAndTime()", "def getCurrentTime():\n\tnow = datetime.datetime.now()\n\thr = now.hour\n\tgreeting = \"\"\n\tampm = \"\"\n\tif (hr < 12): #morning\n\t\thr = hr\n\t\tgreeting = \"morning\"\n\t\tampm = \"am\"\n\telif (hr >= 12 and hr < 1): #afternoon\n\t\thr = hr\n\t\tgreeting = \"afternoon\"\n\t\tampm = \"noon\"\n\telif (hr > 12 and hr < 19): #evening\n\t\thr = hr - 12\n\t\tgreeting = \"evening\"\n\t\tampm = \"pm\"\n\telse: #night\n\t\thr = hr - 12\n\t\tgreeting = \"night\"\n\t\tampm = \"pm\"\n\treturn str(hr) + ':' + str(now.minute),ampm, ' in the ', greeting", "def _Now():\n return datetime.datetime.utcnow()", "def now():\n return datetime.now().strftime(FORMAT)", "def current_time(cls) -> float:", "def get_date():\n return datetime.now().strftime(\"%c\")", "def time_date(self):\r\n from datetime import date\r\n from datetime import datetime\r\n\r\n self.today = date.today() # Output is: 2020-05-19\r\n # Need to change that in a format 19/05/2020\r\n self.today_format = self.today.strftime(\"%d/%m/%Y\") #Output is: 19/05/2020\r\n\r\n self.now = datetime.now()\r\n self.current_time = self.now.strftime(\"%I:%M:%S %p\") # %I- Hour 12hr foemat %M- minitue %S- sec %p- AM/PM\r\n # Both self.current_time and self.today_format are in string format\r\n\r\n return self.current_time, self.today_format", "def __get_current_time(self) -> datetime:\n #return datetime.strptime(\"11:30\", '%H:%M')\n return datetime.now()", "def time():\n return datetime.datetime.now().strftime(\"%Y%m%dT%H%M%SZ\")", "def str_current_time():\n return strftime(\"%Y_%m_%d_%H_%M_%S_%Z\", gmtime())", "def get_time(self):\n\t\treturn time.time()", "def tnow():\n return datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')", "def now (self):\n return datetime.datetime.utcnow ()", "def current_datetime(request):\n now = datetime.datetime.now()\n context = {'page_title': 'Current date time',\n 'current_time': now\n }\n return render(request, 'books/time.html', context)", "def _getCurrentDateString(self):\n currentDateTime = datetime.now()\n return currentDateTime.strftime(\"%Y%m%d_%H%M\")", "def today():\n return datetime.today()", "def now(self, message=\"Now: \"):\n return message + \": \" + str(datetime.datetime.now())", "def get_date():\n return str(datetime.now()).split(' ')[0]", "def today(cls):\n return date()", "def time(self):\r\n now = datetime.datetime.now()\r\n month = rfc822._monthnames[now.month - 1].capitalize()\r\n return ('[%02d/%s/%04d:%02d:%02d:%02d]' %\r\n (now.day, month, now.year, now.hour, now.minute, now.second))", "def formatted_time() -> datetime.datetime:\r\n return datetime.datetime.now()", "def get_today():\n return datetime.today()", "def current_time():\n\n return int(1000 * time())", "def dbCurrentTime():\n return datetime.datetime.utcnow()", "def get_date_time(self):\n now = datetime.datetime.now()\n self.get_current().insert('insert', str(now.strftime(\"%I:%M %p %d-%m-%Y\")))", "def time_at_server():\n return time.asctime()", "def now(self):\n year = self.get_year()\n month = self.get_month()\n day = self.get_day()\n hour = self.get_hour()\n minute = self.get_minute()\n second = self.get_second()\n\n return (year, month, day, hour, minute, second, 0, 0)", "def now(time):\n a = datetime.fromtimestamp(time).strftime('%Y-%m-%d %H:%M:%S')\n return a", "def now(time):\n a = datetime.fromtimestamp(time).strftime('%Y-%m-%d %H:%M:%S')\n return a", "def get_current_time():\n\n now = dt.datetime.now()\n total_time = (now.hour * 3600) + (now.minute * 60) + (now.second)\n return total_time", "def today():\n return date.today()", "def now_s():\n return calendar.timegm(now_dt().utctimetuple())", "def now(self, request):\n identity = self.bot.get_plugin('identity').get_identity_by_request(request)\n\n now = times.now()\n tz = self._get_user_timezone(identity)\n local = times.to_local(now, tz)\n\n request.respond('Server time: {}\\nLocal time:{}'.format(now, local))", "def curr_time():\r\n try:\r\n curr_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\r\n except Exception as e:\r\n print(e)\r\n curr_time = False\r\n return curr_time", "def db_date():\n return datetime.datetime.now()", "def get_current_date(fmt=\"%Y-%m-%d\"):\n return datetime.datetime.now().strftime(fmt)", "def get_current_time_parameters():\n \n current_time = datetime.datetime.now()\n \n return {\n \"day\": current_time.day,\n \"month\": current_time.month,\n \"year\": current_time.year,\n \"hour\": current_time.hour,\n \"minute\": current_time.minute,\n \"second\": current_time.second\n }", "def get_date():\n return (datetime.now() - TIMEDELTA).isoformat()", "def _today() -> str:\n return strftime(DATE_FORMAT, gmtime())", "def today(self):\n return(datetime.date.today().isoformat())", "def get_time():\n\n time_format = \"%Y-%m-%d %H:%M:%S\"\n now = str(datetime.datetime.now().strftime(time_format))\n\n return now", "def get_time():\r\n \r\n dt = datetime.datetime.now()\r\n dt_parsed = dt.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n return dt_parsed", "def get_time(self):\n return self._current_time_sec", "def now():\n return datetime.datetime.now(pytz.utc)" ]
[ "0.82750773", "0.8209311", "0.80392706", "0.8034834", "0.8023809", "0.8011492", "0.7957189", "0.7878803", "0.787236", "0.7862835", "0.78527325", "0.78244144", "0.78027594", "0.7793225", "0.7789877", "0.7701877", "0.7679058", "0.76773113", "0.7676556", "0.7654265", "0.765412", "0.7653136", "0.7638145", "0.7633823", "0.76332694", "0.7604462", "0.7594048", "0.75670576", "0.75670576", "0.7565505", "0.75489146", "0.75462186", "0.7535679", "0.75150025", "0.7511738", "0.7505195", "0.7494416", "0.7493078", "0.7493078", "0.7491337", "0.7478628", "0.74685955", "0.74371934", "0.743026", "0.740809", "0.74023527", "0.74005514", "0.739449", "0.739449", "0.7393566", "0.73842514", "0.73747075", "0.7373905", "0.7353008", "0.7328486", "0.7313341", "0.7282247", "0.7259651", "0.72546035", "0.7244178", "0.7242947", "0.7238452", "0.7237827", "0.72257465", "0.7225634", "0.7216693", "0.72052675", "0.7202915", "0.7179487", "0.7158413", "0.7132617", "0.71301293", "0.710961", "0.7082368", "0.7067771", "0.7067597", "0.7060235", "0.70448583", "0.7037523", "0.70322424", "0.69963515", "0.6988818", "0.69809514", "0.6980326", "0.69789284", "0.69789284", "0.69752145", "0.6950934", "0.69483405", "0.69434965", "0.69267344", "0.69221157", "0.69136524", "0.6902447", "0.68936956", "0.68933815", "0.6892099", "0.6889352", "0.68891215", "0.687505", "0.68700933" ]
0.0
-1
Take a number such as 62 and return 62nd. 63, 63rd etc.
def ordinal_filter(value): digit = value % 10 if 10 < value < 20: o = 'th' elif digit is 1: o = 'st' elif digit is 2: o = 'nd' elif digit is 3: o = 'rd' else: o = 'th' return '%d%s' % (value, o)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last_digit(n):\n\n return n % 10", "def get_dig_num(num, n = 1):\n digit = num//10**n%10 # this is the n-th digit, 0-indexed\n return digit", "def digit(number: int, n: int) -> int:\n return number // 10 ** n % 10", "def last_n_digits(num, n):\n return num%(10**n)", "def get_nth_digit(n, digit):\n n = n / 10**(digit-1)\n return n % 10", "def find_num(n: int) -> int:\n n = n - 54 * (n // 54)\n n = n - 6 * (n // 6)\n flat_nums = {1:1,\n 2:2,\n 3:3,\n 4:4,\n 5:5,\n 0:6}\n return(flat_nums[n%6])", "def compute(num):\n # 567 / 9 = 63, 235 / 47 = 5\n num = (num * 63 + 7492) * 5 - 498\n if num < 0: # modulus won't give correct result if number is negative\n num *= -1\n res = (num // 10) % 10\n return res", "def returnOnceDigit(no):\n \n div = 1\n for i in range(1,len(str(no))):\n div = div * 10\n no = int(no)\n while(not no < 10):\n no = no % div\n div = div/10\n return int(no)", "def fn(n):\n digits = [int(x) for x in str(n)]\n for i in reversed(range(len(digits)//2+1)): \n if digits[i] < 9: break \n else: return 10*n + 11\n digits[i] = digits[~i] = digits[i] + 1\n for ii in range(i): \n digits[~ii] = digits[ii]\n for ii in range(i+1, len(digits)//2+1): \n digits[ii] = digits[~ii] = 0\n return int(\"\".join(map(str, digits)))", "def v2r(n, base):\n b = len(base)\n digits = ''\n while n > 0:\n digits = base[n % b] + digits\n n = n // b\n return digits", "def first_n_digits(num, n):\n return num // 10 ** (int(math.log(num, 10)) - n + 1)", "def cut_number(n):\n i = 0\n while True:\n cur = n % 10\n ahead = n // 10 % 10\n if cur > ahead:\n i += 1\n break\n i += 1\n n //= 10\n return i", "def get_digit(n):\n \"\"\"\n 9 - 9\n 10-99 - 2*90 \n 100-999 - 3*900\n \"\"\"\n i = 0\n start = 0\n done = False\n while not done:\n step = (i+1)*9*(10**i)\n if start + step > n:\n done = True\n else:\n start += step\n i += 1\n\n num_digits = i + 1\n\n offset = n - start\n number = offset // num_digits + \\\n (10**(num_digits-1) if (num_digits - 1) else 0)\n _i = (offset - 1) % num_digits\n return int(str(number)[_i])", "def get_digits(num: int) -> str:\n return \"\".join(sorted(str(num**3)))", "def modifier(base):\n return int(math.floor((base - 10) / 2))", "def r2v(digits, base):\n b = len(base)\n n = 0\n for d in digits:\n n = b * n + base[:b].index(d)\n return n", "def last_digits(x,y,n):\n\tr = modexp(x,y, 10**n)\n\n\t#post processing to make sure we didn't cut off a leading 0\n\treturn str(r).zfill(n)", "def solution():\n product = 1\n\n digits = (1, 10, 100, 1000, 10000, 100000, 1000000)\n n = 0\n position = 0\n for digit in digits:\n # increment n until we reach the digit we are interested in\n while digit > position:\n n+=1\n position +=len(str(n))\n # The digit we want is in str(n). If position == digit, then\n # it is the last digit of n. If position > digit, we want the\n # (position - digit)'th last digit of n.\n offset = position - digit\n d_n = int(str(n)[-1-offset])\n # update the product\n product *= d_n\n return product", "def int2dec(n: int) -> str:", "def reverse_digits(number: int):\n acc = 0\n\n while number != 0:\n acc *= 10\n acc += number % 10\n\n number //= 10 # 정수 나눗셈\n\n return acc", "def get_tens_ones_digits(number):\n return int(number/10), number // 10**0 % 10", "def decomp(num):\n base = 10 ** (len(str(num))-1)\n divisor, resto = divmod(num, base)\n return divisor * base, resto", "def mirror(n):\n return (n % 10)*10 + (n // 10)", "def base10toN(num, base):\n\n converted_string, modstring = \"\", \"\"\n currentnum = num\n if not 1 < base < 37:\n raise ValueError(\"base must be between 2 and 36\")\n if not num:\n return '0'\n while currentnum:\n mod = currentnum % base\n currentnum = currentnum // base\n converted_string = chr(48 + mod + 7*(mod > 10)) + converted_string\n return converted_string", "def base10toN(num, base):\n\n converted_string, modstring = \"\", \"\"\n\n currentnum = num\n\n if not 1 < base < 37:\n raise ValueError(\"base must be between 2 and 36\")\n\n if not num:\n return '0'\n\n while currentnum:\n mod = currentnum % base\n currentnum = currentnum // base\n converted_string = chr(48 + mod + 7*(mod > 10)) + converted_string\n\n return converted_string", "def _algorithm(self, rut):\n suma = 0\n multi = 2\n for r in rut[::-1]:\n suma += int(r) * multi\n multi += 1\n if multi == 8:\n multi = 2\n return u'0123456789K0'[11 - suma % 11]", "def esrever2(n, s):\n if n == 0:\n return s\n else:\n result = esrever2(n // 10, s * 10 + n % 10)\n return result", "def get_n_digit(num):\n cnt = 0\n while num & 1 != 1:\n num >>= 1\n cnt += 1\n # print(cnt)\n return cnt", "def problem_48():\n\n return int(str(sum(x**x for x in range(1, 1001)))[-10:])", "def ToBase(b, n):\r\n d = []\r\n while n:\r\n d.append(n % b)\r\n n //= b\r\n d.reverse() \r\n return int(''.join(map(str, d)))", "def get_ordinal(n):\n n = int(n)\n # Zeroeth, First, Second, Third, Fourth+\n ordinal = ['th', 'st', 'nd', 'rd', 'th'][min((n % 10), 4)]\n # all the teenths that don't match the above pattern\n if (n % 100) in [11, 12, 13]:\n ordinal = 'th'\n return f\"{n}{ordinal}\"", "def convert_base(num, n):\r\n new_num_string = ''\r\n current = num\r\n while current != 0:\r\n remainder = current % n\r\n if remainder > 9:\r\n remainder_string = HEX_CHARS[remainder]\r\n elif remainder >= 36:\r\n remainder_string = '('+str(remainder)+')'\r\n else:\r\n remainder_string = str(remainder)\r\n new_num_string = remainder_string+new_num_string\r\n current = current//n\r\n return new_num_string", "def karana(n):\n if n == 1:\n return 0\n elif n > 57:\n return n - 50\n else:\n return amod(n - 1, 7)", "def CLng(num):\n return int(round(float(num)))", "def problem_52():\n\n for number in xrange(1, 123456789):\n sorted_num = ''.join(sorted(str(number)))\n if len([value for value in xrange(2, 7)\n if ''.join(sorted(str((value * number)))) == sorted_num]) == 5:\n return number", "def get_base_2(n):\n return str(bin(int(n))).removeprefix('0b')", "def print_last_digit(number):\n\n ld = abs(number) % 10\n print(ld, end=\"\")\n return ld", "def getNumber():", "def test_right_twos_to_int(self):\n self.assertEqual(utils.twos_to_int('101'.zfill(8)), 5)", "def get_oglindit(numar):\n if numar < 0:\n return numar\n numar_str = str(numar)\n numar_str = numar_str[::-1]\n return int(numar_str)", "def num_reverse(num):\r\n return int(str(num)[::-1])", "def perfect_number(base):\n\tif type(base) is not int or base < 0:\n\t\treturn None\n\tbase = str(base)\n\tlt = [int(x) for x in base]\n\tif sum(lt) > 10:\n\t\treturn None\n\telse:\n\t\tlt.append(10-sum(lt))\n\tlt = [str(x) for x in lt]\n\treturn int(\"\".join(lt))", "def residuo_cero(numero):\n for x in range (1,10):\n if(numero % x == 0):\n return x \n return numero", "def rounddown(number):\n\n # Return the answer\n return math.floor(number)", "def f(n):\n\tfor i in range(101, n):\n\t\tif (i % 21 == 0):\n\t\t\treturn i", "def findlastdigit(barcode):\r\n splittuple = splitOddEven(barcode)\r\n oddsum = partSum(splittuple[0], 1)\r\n evensum = partSum(splittuple[1], 3)\r\n totalsum = oddsum + evensum\r\n lastdigitfind = 10 - (totalsum % 10)\r\n if lastdigitfind == 10:\r\n lastdigitfind = 0\r\n return lastdigitfind", "def count_digit(x, i):\n \"\"\" GRAPSTE TON KWDIKA SAS APO KATW \"\"\"\n su = 0\n s = 0\n k = x\n while(i>1):\n x = x//10\n i = i-1\n s = x%10\n while(k>0):\n if((k%10)==s):\n su = su + 1\n k = k//10\n return su", "def check_digit(raw_code):\n s = sum(code(char) * 2**index for index, char in enumerate(raw_code))\n return s % 11 % 10", "def last_8(some_int):\n\n return int(str(some_int)[-8:])", "def baseConverter(number, base):\n\n digits = \"0123456789ABCDEF\"\n\n remainders = Stack()\n\n while number > 0:\n rem = number % base\n remainders.push(rem)\n number = number // base\n\n result = \"\"\n\n while not remainders.isEmpty():\n popped = remainders.pop()\n digit = digits[popped]\n result += str(digit)\n return result", "def baseconvert(num, base):\n\n digits = \"0123456789abcdefghijklmnopqrstuvwxyz\"\n\n try:\n num = int(num)\n base = int(base)\n except ValueError:\n return \"\"\n\n if num < 0 or base < 2 or base > 36:\n return \"\"\n\n num_string = \"\"\n while 1:\n remainder = num % base\n num_string = digits[remainder] + num_string\n num = num / base\n if num == 0:\n break\n\n return num_string", "def fo_shizzle_my_nizzle(n): \n if n < 0:\n n = \"fo\"\n elif n >= 1 and n < 50: \n n = \"shizzle\"\n elif n >= 50 and n <= 100:\n n = \"my\"\n elif n % 2 == 0 and n % 3 == 0 and n > 100:\n n = \"nizzle\"\n else:\n n = \"\"\n return n", "def sixteen():\r\n \r\n number = str(pow(2, 1000))\r\n sum = 0\r\n \r\n for i in number:\r\n sum += int(i)\r\n \r\n return sum", "def get_digits(i):\n unit = i % 10\n hundred = i // 100\n ten = (i - hundred*100) // 10\n return hundred, ten, unit", "def dec2int(r: str) -> int:", "def reverse(n):\n return(int(str(n)[::-1]))", "def mode_digit(n):\n temp = dict()\n result = 0\n most = 0\n n = abs(n)\n while n != 0:\n val = n % 10\n if val in temp:\n temp[val] += 1\n else:\n temp[val] = 1\n n = n // 10\n for k, v in temp.items():\n if v >= most:\n if v == most:\n if k > result:\n result = k\n else:\n most = v\n result = k\n return result", "def next_integer(n):\n if n % 2 == 0:\n return n // 2\n else:\n return 3 * n + 1", "def dec2base(n, base):\n convertstring = \"0123456789ABCDEF\"\n if n < base:\n return convertstring[n]\n else:\n return dec2base(n // base, base) + convertstring[n % base]", "def convert_base(num, to_base):\n\n\tdigits = '0123456789ABCDEF'\n\tresult = ''\n\n\tif num < to_base:\n\t\treturn digits[num]\n\telse:\n\t\tresult += convert_base(num/to_base, to_base) + str(digits[num % to_base])\n\n\treturn result", "def calc_check_digit(number):\n weights = (2, 4, 8, 5, 10, 9, 7, 3, 6)\n return str(sum(w * int(n) for w, n in zip(weights, number)) % 11 % 10)", "def _get_b26_num(rem, N):\n if N>0:\n pexp = 26**N\n remainder = (rem % pexp) \n return chr(97 + (rem // pexp)) + PyJSplit._get_b26_num(remainder, N-1) \n else: \n return chr(97 + rem)", "def _num(self):\n try:\n num = int(self.__rId[3:])\n except ValueError:\n num = 9999\n return num", "def convertebase10basen(basedest, numero):\n ret = \"\"\n while True:\n digit = numero%basedest\n ret = ret + DIGITOS[digit]\n numero = numero // basedest\n if numero == 0:\n break\n return ret[::-1]", "def calc_check_digit(value):\n check_digit = 0\n odd_pos = True\n for char in str(value)[::-1]:\n if odd_pos:\n check_digit += int(char) * 3\n else:\n check_digit += int(char)\n odd_pos = not odd_pos # alternate\n check_digit = check_digit % 10\n check_digit = 10 - check_digit\n check_digit = check_digit % 10\n return check_digit", "def isbn_13_check_digit(twelve_digits):\r\n if len(twelve_digits) != 12: return None\r\n try: int(twelve_digits)\r\n except: return None\r\n thirteenth_digit = 10 - int(sum((i % 2 * 2 + 1) * int(x) for i, x in enumerate(twelve_digits)) % 10)\r\n if thirteenth_digit == 10: thirteenth_digit = '0'\r\n return str(thirteenth_digit)", "def normexponent(val):\n n = np.log10(val)\n if n < 0:\n n = int(n) - 1\n else:\n n = int(n)\n return n", "def num_digits(num):\r\n if num == 0:\r\n return 1\r\n return int(log10(num)+1)", "def pseudo_int(string_num):\r\n int_num = 0\r\n reversed_string_num = string_num[::-1] # begin read the characters from the end of the string.\r\n for indexx in range(len(string_num)):\r\n digit = reversed_string_num[indexx]\r\n int_num += (ord(digit) - ord('0')) * 10**indexx # '2698' => 8 * 10**0 + 9 * 10**1 + 6 * 10**2 + 2 * 10**3 = 2698\r\n return int_num", "def dec2FactString(nb):\n num_str = '0'\n if nb <= 0:\n return num_str\n \n # find largest factorial base\n largest_base = 0\n while nb >= factorial(largest_base):\n largest_base += 1\n largest_base -= 1\n \n digit = ['0'] * largest_base\n digit[0] = str(nb / factorial(largest_base))\n remainder = nb % factorial(largest_base)\n for i in range(largest_base - 1, 0, -1):\n digit[largest_base - i] = str(remainder / factorial(i))\n remainder = remainder % factorial(i)\n for i in range(0, len(digit)):\n if int(digit[i]) > 9:\n digit[i] = chr(int(digit[i]) + 55)\n return \"\".join(digit) + '0", "def solve(number):\n if number == 0:\n return \"INSOMNIA\"\n else:\n total_digits = 10 # there are 10 digits [0-9]\n digits_seen = set()\n multiplier = 0\n while len(digits_seen) < total_digits:\n multiplier += 1\n digits_in_n = {int(i) for i in str(multiplier*number)}\n digits_seen = digits_seen.union(digits_in_n)\n return multiplier*number", "def decimal_to_ndom (a):\r\n ndo=\"\" \r\n num=a//6\r\n rem=a%6\r\n ndo=ndo+str(rem)\r\n num2=num//6\r\n rem=num%6\r\n ndo=ndo+str(rem) \r\n num3=num2//6\r\n rem=num2%6\r\n ndo=ndo+str(rem)\r\n ndo= ndo[::-1]\r\n if(ndo[0]=='0'):\r\n if(ndo[1]=='0'):\r\n ndom=ndo[2]\r\n else:\r\n ndom=ndo[1:]\r\n else:\r\n ndom=ndo\r\n return ndom", "def find_max_tidy_num(s_number):\n\n len_input = len(s_number) - 1\n\n if len_input == 0:\n return s_number\n\n for i in range(0, len_input):\n if int(s_number[i]) > int(s_number[i+1]):\n\n final_str = '9' * (len_input - i)\n s_number = s_number[:(i+1)]\n\n return ''.join([find_max_tidy_num(str(int(s_number)-1)), final_str])\n\n return s_number", "def _num_factor(number, factor):\n assert factor != 0\n return number // factor", "def baseN(num, b, numerals=\"0123456789abcdefghijklmnopqrstuvwxyz\"):\n neg = num < 0\n num = abs(num)\n val = ((num == 0) and numerals[0]) or (baseN(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b])\n return '-' + val if neg else val", "def lastDigit(barcode):\r\n lastnumber = int(barcode[-1])\r\n return lastnumber", "def evalute_number(dialed):\n if (len(dialed) == 11 or len(dialed) == 10) and str(dialed).startswith(\"0\"):\n # UK Number\n return \"+44%s\" % (dialed[1:])\n elif len(dialed) == 6:\n # Local Fishguard numbers\n return \"+441348%s\" % (dialed)\n return None", "def int_to_base(num, base):\n if base<=0: return '0' \n digits = []\n if (num <0):\n \tcur= -num\n else: cur = num\n while(cur>0):\n\t\tdigits.append(str(cur%base))\n\t\tcur/=base\n if (num <0): digits.append('-')\n digits.reverse()\n\n \n \n return ''.join(digits)", "def convert(self, base):\n number = self.number\n converted_number = []\n while number:\n rest = number % base\n converted_number.append(rest)\n number = number / base\n return converted_number[::-1]", "def get_digit_prefix(characters):\n value = 0\n while characters and characters[0].isdigit():\n value = value * 10 + int(characters.pop(0))\n return value", "def apnumber(value):\r\n try:\r\n value = int(value)\r\n except (TypeError, ValueError):\r\n return value\r\n if not 0 < value < 10:\r\n return str(value)\r\n return (_('one'), _('two'), _('three'), _('four'), _('five'), _('six'),\r\n _('seven'), _('eight'), _('nine'))[value - 1]", "def ordinal_conversion(value):\n last_digit = value.group(0)[-1]\n value_map = {'1': 'st', '2':'nd', '3':'rd'}\n if value_map.get(last_digit, False):\n return value.group(0) + value_map[last_digit]\n else:\n return value.group(0) + 'th'", "def get_number(x):\n\n return re.findall(r'\\d+', x)[0]", "def ordinal(num):\n if num > 9:\n secondToLastDigit = str(num)[-2]\n if secondToLastDigit == '1':\n return 'th'\n lastDigit = num % 10\n if (lastDigit == 1):\n return 'st'\n elif (lastDigit == 2):\n return 'nd'\n elif (lastDigit == 3):\n return 'rd'\n else:\n return 'th'", "def factString2Dec(string):\n num_out = long(0)\n string_rev = string[::-1]\n for i in range(0, len(string_rev)):\n if ord(string_rev[i]) - 64 > 0:\n digit = ord(string_rev[i]) - 55\n else:\n digit = int(string_rev[i])\n num_out += factorial(i) * digit\n return num_out", "def b36_to_dec(b):\n return int(b, 2)", "def patten2number(sequence):\n try:\n if len(sequence) == 0:\n return 0\n last_base = sequence[-1]\n prefix = sequence[:-1]\n return 4 * patten2number(prefix) + BASE_TO_NUMBER[last_base]\n except KeyError:\n raise ValueError('Not able to convert nucleotide: %s' % last_base)", "def kmer_to_number(kmer):\n nucleotide_to_number, _ = nucleotide_numbering()\n if kmer == '':\n return 0\n last_nucleotide = kmer[-1]\n prefix = kmer[:len(kmer) - 1]\n res = 4 * kmer_to_number(prefix) + nucleotide_to_number[last_nucleotide]\n return res", "def spell_number(num):\n tens, units = num / 10, num % 10\n tens_str = NUMBERS_10[tens]\n units_str = NUMBERS_1[units]\n if tens == 1:\n return NUMBERS_TEEN[units]\n elif tens:\n if units:\n return \"{t} {u}\".format(t=tens_str, u=units_str)\n return \"{t}\".format(t=tens_str)\n else:\n return units_str", "def int2base(x, base):\n digs = string.digits + string.ascii_lowercase\n if x < 0:\n sign = -1\n elif x == 0:\n return '0'\n else:\n sign = 1\n x *= sign\n digits = []\n while x:\n digits.append(digs[x % base])\n x //= base\n if sign < 0:\n digits.append('-')\n digits.reverse()\n return ''.join(digits)", "def ordinal(n):\n ord_dict = {1: \"st\", 2: \"nd\", 3: \"rd\"}\n return str(n + 1) + ord_dict.get((n + 1) if (n + 1) < 20 else (n + 1) % 10, \"th\")", "def base(num,conv,rem=0,baseResult=[]):\r\n if num==0:\r\n strResult=''\r\n for i in baseResult[::-1]:\r\n strResult+=str(i)\r\n return int(strResult)\r\n else:\r\n baseResult.append(num%conv)\r\n return base(num//conv,conv,num%conv,baseResult)", "def luhn_algo(num):\n sum = 0\n num_string = str(num) # Converts num into string type\n # Starts with second to last digit\n # iterates by -2 until length of string is reached\n for i in range(-2, -len(num_string) - 1, -2):\n dig_product = int(num_string[i]) * 2\n if dig_product > 9: # If product is 2 digits, sum both individual digits\n sum += dig_product % 10\n sum += dig_product // 10 # int division to get first digit\n else:\n sum += dig_product % 10\n for i in range(-1, -len(num_string) - 1, -2):\n sum += int(num_string[i])\n return sum", "def split(n):\n rest_of_num, last_num = n // 10, n % 10\n return rest_of_num, last_num", "def resolve_naked_numbers(roll):\n if \"*\" in roll:\n first_factor = roll.split(\"*\")[0]\n second_factor = roll.split(\"*\")[1]\n if \"D\" in first_factor:\n roll = str(int(int(first_factor.split(\"D\")[0]) * float(second_factor))) + first_factor.split(\"D\")[1]\n elif \"D\" in second_factor:\n roll = str(int(int(second_factor.split(\"D\")[0]) * float(first_factor))) + second_factor.split(\"D\")[1]\n else:\n roll = str(int(float(first_factor)) * int(float(second_factor)))\n if \"/\" in roll:\n dividend = roll.split(\"/\")[0]\n divisor = roll.split(\"/\")[1]\n roll = str(int(float(dividend)/float(divisor)))\n return roll", "def getMantisse(number):\n mantisse = number / np.power(10, np.floor(np.log10(number)))\n return(mantisse)", "def numeral(number):\n return ROMAN_NUMERALS[number]", "def get_rand_senary(ndigits, base=0):\n # Algorithm from https://stackoverflow.com/questions/137783/expand-a-random-range-from-1-5-to-1-7/891304#891304\n senary_digits = []\n state = 0\n pow1 = 1\n pow2 = 6\n while len(senary_digits) < ndigits:\n if state // pow1 == (state + pow2) // pow1:\n result = state // pow1\n state = (state - result * pow1) * 6\n pow2 *= 6\n senary_digits.append(result+base)\n else:\n state = 256 * state + pow2 * ord(os.urandom(1))\n pow1 *= 256\n # Keep the size of the huge numbers under a googol so it doesn't slow to a crawl.\n if pow1 > 10e100 or pow2 > 10e100:\n pow1 = 1\n pow2 = 6\n state = 0\n return ''.join(map(str, senary_digits))", "def base_converter(decimal_number, base):\n digits = \"0123456789ABCDEF\"\n quotient_stack = Stack()\n reminder = decimal_number\n while reminder > 0:\n quotient = reminder % base\n quotient_stack.push(quotient)\n reminder = reminder // base\n\n new_string = \"\"\n while not quotient_stack.is_empty():\n new_string = new_string + digits[quotient_stack.pop()]\n return new_string", "def digit_sum(n):\n s = 0\n while n:\n s += n % 10\n n //= 10\n return s", "def sumDigit():" ]
[ "0.7141755", "0.68063426", "0.6753309", "0.6747062", "0.6726129", "0.6608696", "0.6578584", "0.6549075", "0.6525275", "0.6459882", "0.64534855", "0.64322025", "0.64104617", "0.6395445", "0.63594806", "0.63561016", "0.6331798", "0.6282339", "0.62483346", "0.6241493", "0.62405914", "0.6229513", "0.62102264", "0.61884254", "0.61616105", "0.6159436", "0.61593", "0.6159249", "0.615459", "0.61225903", "0.60870355", "0.60760003", "0.6058877", "0.60496175", "0.6048391", "0.6043312", "0.6016738", "0.6009186", "0.59885746", "0.5988523", "0.59642315", "0.5948813", "0.5934581", "0.59195864", "0.59156984", "0.589325", "0.5888338", "0.58833474", "0.58737874", "0.5850879", "0.58504224", "0.5833724", "0.58152694", "0.5811175", "0.58014375", "0.57829124", "0.5777648", "0.577128", "0.57660925", "0.57608277", "0.5759435", "0.57587266", "0.574852", "0.57439095", "0.57343", "0.57220536", "0.57200676", "0.5716647", "0.57154393", "0.5714267", "0.5714176", "0.5698399", "0.56950706", "0.56868076", "0.56798863", "0.5676389", "0.56672966", "0.56619024", "0.56566274", "0.5654898", "0.5651811", "0.5650906", "0.5645723", "0.5644753", "0.5643255", "0.5640408", "0.5635695", "0.563036", "0.56293285", "0.5613441", "0.56106585", "0.5608929", "0.56023985", "0.5602198", "0.5601798", "0.5601007", "0.5597954", "0.55938816", "0.559308", "0.55859214", "0.5580441" ]
0.0
-1
Class constructor receives parameters to connect to the networkAPI.
def __init__(self, networkapi_url, user, password, user_ldap=None): super( RoteiroEquipamento, self).__init__( networkapi_url, user, password, user_ldap)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, address):\r\n self.loop = asyncio.new_event_loop()\r\n self.device_address = address\r\n\r\n self.__api_address = \"http://\" + address if not address.startswith('http://') else address\r\n _LOGGER.debug(\"Api address: %s\", self.__api_address)\r\n self.model_raw = None\r\n self.model = None\r\n self.working_mode_raw = None\r\n self.working_mode = None\r\n self.host_name = None\r\n self.main_status = None\r\n\r\n self.wifi_sta = None\r\n self.system = None\r\n self.cloud = None\r\n self.mqtt = None\r\n self.firmware = None\r\n self.relays = None\r\n self.rollers = None", "def __init__(self, address=\"lex\", port=8000, **kwargs):\n self.connect(address, port)", "def __init__(self, *args, **kwargs):\n self.driver = importutils.import_module(CONF.network_driver)\n\n self.q_conn = quantum_connection.QuantumClientConnection()\n self.m_conn = melange_connection.MelangeConnection()\n self.a_conn = aiclib_connection.AICLibConnection()\n\n # NOTE(tr3buchet): map for global uuids\n # if these should change, restart this service\n # self._nw_map will look like:\n # self._nw_map = {'0000000000-0000-0000-0000-000000000000': pub_uuid,\n # '1111111111-1111-1111-1111-111111111111': priv_uuid,\n # pub_uuid: '0000000000-0000-0000-0000-000000000000',\n # priv_uuid: '1111111111-1111-1111-1111-111111111111'}\n # there will be only one (each way) entry per label\n self._nw_map = {}\n self._rackconnect_servicenet = None\n\n if CONF.network_global_uuid_label_map:\n self._nw_map = self._get_nw_map()\n LOG.debug('the self._nw_map is |%s|' % self._nw_map)\n else:\n self._nw_map = {}\n\n self._rackconnect_roles = set(CONF.rackconnect_roles)\n rc_public_gateway_roles = CONF.rackconnect_public_gateway_roles\n self._rc_public_gateway_roles = set(rc_public_gateway_roles)\n\n super(QuantumManager, self).__init__(service_name='network',\n *args, **kwargs)", "def __init__(self,ip,user,pwd):\r\n self.user = user\r\n self.pwd = pwd\r\n self.ip = ip\r\n self.api_endpoint = 'https://%s/api/json/v2/types/' % self.ip\r\n\r\n self.clusters = self._get_objects(\"clusters\")\r\n self.xms = self._get_objects(\"xms\")", "def __init__(self,address = None):\n\t\t# I really should do some validation around here\n\t\n\t\tif address != None:\n\t\t\tself.connect(address)", "def __init__(self, network: Network):\n if LOG[\"ExperimentAI\"]:\n print(\"[ExperimentAI] Initializing AI\")\n self.network = network", "def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n\n config = kwargs.get(\"config\", kwargs)\n self.connection_type = config.get(\"connection_type\", None)\n self.connection = connection_decider.connection(device=self,\n conn_type=self.connection_type,\n **kwargs)\n self.connection.connect()\n self.consoles = [self]\n super(PrplMeshStation, self).__init__(*args, **kwargs)\n self.iface_dut = self.iface_wifi = self.kwargs.get(\n 'iface', 'wlan0')\n self.driver_name = config.get(\"driver\", \"nl80211,wext\")\n self.mac = self.get_mac()\n\n # kill all wpa_supplicant relevant to active interface\n self.wifi_disconnect()\n # Turn on and off wlan iface just in case\n self.disable_and_enable_wifi()", "def __init__(self, IP=None, Port=None):\n\n self.validate_input(IP, Port)\n self.url = ('http://{}:{}/v2/'.format(self.IP, self.Port))\n self.get_templates()\n self.get_data()\n self.telnet_threads = []", "def __init__(self):\n\n\t\tself.connection = self.get_connection()", "def __init__(self, host, port, initialized=None, uuid=None, debug=False, no_mine=False, benchmark=False, neighbors=[]):\n\n m = sha1()\n m.update(host.encode())\n m.update(str(port).encode())\n\n self.metadata = {}\n self.metadata['done'] = initialized\n self.metadata['host'] = host\n self.metadata['port'] = port\n self.metadata['uuid'] = str(m.hexdigest()) if uuid is None else uuid\n self.metadata['debug'] = debug\n self.metadata['no_mine'] = no_mine\n self.metadata['benchmark'] = benchmark\n self.metadata['resolve_requests'] = set()\n self.metadata['resolve_lock'] = Lock()\n\n if benchmark:\n from threading import Semaphore\n self.metadata['benchmark_lock'] = Semaphore(0)\n\n if self.metadata['uuid'] == 'SYSTEM':\n raise InvalidID\n\n initialize_log(self.metadata['uuid'], debug)\n\n # Create the Blockchain object.\n self.metadata['blockchain'] = Blockchain()\n self.metadata['history'] = History(self.metadata['uuid'])\n\n # Create the Network Handler object.\n self.nh = NetworkHandler(self.metadata, neighbors)\n\n # Start the Network Handler main loop.\n self.nh.event_loop()", "def __init__ (self, key = None, password = None):\n self.KEY = key or 'testapi'\n self.PASSWORD = password or 'testpass'\n self.URL = 'https://api.internet.bs/'\n # assume we use test credential if none were provided\n if not key or not password:\n self.URL = 'https://testapi.internet.bs'", "def __init__(self, host=\"127.0.0.1\", port=8888, ae_title=\"PYMEDPHYSCONNECT\"):\n\n self.host = host\n self.port = port\n self.ae_title = ae_title\n\n logging.debug(\n \"DicomConnect host: %s, port: %d, AE Title: %s\",\n self.host,\n self.port,\n self.ae_title,\n )", "def __init__(self, ip=\"192.168.10.1\", port=23, community=\"private\", type=\"int\"):\n self.ip = ip\n self.port = port\n self.community = community\n self.type=type\n logger.info(\"port is %r\" % port)\n logger.info(\"Connected to instrument at ip %s, port %d\" % (ip, port))", "def __init__(self, host='192.168.45.45', username='admin', password='Admin123', autodeploy=True):\n logging.debug(\"In the FMC __init__() class method.\")\n\n self.host = host\n self.username = username\n self.password = password\n self.autodeploy = autodeploy", "def __init__(self, hostname, username, password, timeout, optional_args):\n raise NotImplementedError", "def __init__(self):\n\n self.host=\"localhost\"\n \"\"\"\n :annotation = \"localhost\":\n defaults to \"localhost\". At this time MumbleClient is ipv4 only\n \"\"\"\n\n self.port=64738\n self.nickname=\"MumblePythonBot\"\n self.SSLOptions=CertificateOptions()\n self.password=None", "def initialise_network(self):\n raise NotImplementedError", "def __init__(self, *args):\n _snap.TNEANet_swiginit(self, _snap.new_TNEANet(*args))", "def __init__(self, host, port):\n self.host = host\n self.port = port", "def __init__(self, host, port):\n self.host = host\n self.port = port", "def __init__(self, host, port, driver, connection_string, password=\"\", use_ssl=False):\n self._socket = socket.create_connection((host, port))\n if use_ssl:\n self._socket = ssl.wrap_socket(self._socket)\n\n self._pass = password\n self._driver = driver\n self._connection_string = connection_string\n self._id = 1", "def __init__(self, **kwargs):\n self.config = kwargs[\"config\"]\n self.cli = client.DefaultClient(app_key=self.config[\"app_key\"], app_secret=self.config[\"app_secret\"])\n self.req = None", "def __init__(self, host_url=Constants.DEFAULT_LOCAL_HOST,\n username=Constants.DEFAULT_USERNAME,\n password=Constants.DEFAULT_PASSWORD,\n is_https=Constants.DEFAULT_HTTPS,\n verify_https=Constants.DEFAULT_VERIFY_HTTPS):\n self.router = OpenWrtLuciRPC(host_url, username, password,\n is_https, verify_https)", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # set by :method:`connect`\n self.bootstrap_server = self.topic = self.start_params = self.end_params = None\n self.start_p_offsets = self.end_p_offsets = None\n self.available_topics = None\n self.client = None\n\n # publicly readable\n self.stats = Pinnate({\"added\": 0})\n\n # used during read\n self.approx_position = None\n self.items_to_fetch = None", "def __init__(self, config):\n\n\t\tself.config = config\n\n\t\t\"\"\" Valid socket methods \"\"\"\n\t\tself.methods = {\n\t\t\t\"update\": self.update,\n\t\t\t\"get\": self.get,\n\t\t\t\"download\": self.download,\n\t\t\t\"heartbeat\": self.heartbeat,\n\t\t\t\"goodbye\": self.goodbye\n\t\t}\n\n\t\t\"\"\" Directories that MUST be created to function properly \"\"\"\n\t\tself.required_directories = {\n\t\t\tself.config[\"daemon\"][\"rootdir\"] + \"/packages\"\n\t\t}\n\n\t\t\"\"\" Create directories, if they don't exist \"\"\"\n\t\tfor d in self.required_directories:\n\t\t\tif not os.path.exists(d):\n\t\t\t\tos.makedirs(d)\n\n\t\t\"\"\" Initialize libtorrent session \"\"\"\n\t\tself.ses = lt.session()\n\t\ttry:\n\t\t\tself.ses.listen_on(int(self.config[\"libtorrent\"][\"listen_port_lower\"]), int(self.config[\"libtorrent\"][\"listen_port_upper\"]))\n\t\texcept:\n\t\t\tprint(traceback.format_exc())\n\t\t\tsys.exit(1)\n\n\t\tself.ses.start_dht()\n\t\tself.ses.start_upnp()\n\n\t\t\"\"\" \n\t\tDownload and update package list\n\t\tTODO: Check to ensure this logic is correct\n\t\t\"\"\"\n\t\tself.master = BTEdb.Database(self.config[\"daemon\"][\"rootdir\"] + \"/package-index.json\")\n\t\tself.update_list()", "def __init__(self, *args, **kwargs):\n self._initialize_protocols()\n super().__init__(*args, **kwargs)", "def __init__(self):\n # Create a TCP/IP socket\n self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "def __init__(self, host, port):\n self._host = host\n self._port = port", "def __init__(self, url, username, password):\n\n # NOTE(kmestery) The 'limit' paramater is intended to limit how much\n # data is returned from ODL. This is not implemented in the Hydrogen\n # release of OpenDaylight, but will be implemented in the Helium\n # timeframe. Hydrogen will silently ignore this value.\n self.url = str(url) + '/' + ODL_NETWORKS + '?limit=1'\n self.username = username\n self.password = password\n self.auth_cookies = None\n self.last_request = None\n self.expired = None\n self.session_timeout = cfg.CONF.ml2_odl.session_timeout * 60\n self.session_deadline = 0", "def __init__(self):\n self.host = None\n self.port = None\n self.topic = None\n self._is_opened = False\n self.debug = 0\n self.qos = 0\n self.mqttc = mqtt.Client(\"sng_mqtt\")", "def __init__(self, config):\n\n self.umodes = \"+iko\"\n\n self.config = config\n\n ServerConn.__init__(self, self.config[\"host\"], self.config[\"port\"])\n\n self.last_uid = 0", "def __init__(self, address: str, port: int) -> None:\n super().__init__()\n self.address = address\n self.port = port", "def __init__(self, host='localhost', port=9091, path='/transmission/rpc',\n username=None, password=None, ssl=False, timeout=DEFAULT_TIMEOUT):\n if ssl:\n scheme = 'https'\n else:\n scheme = 'http'\n\n self.url = \"%s://%s:%d%s\" % (scheme, host, port, path)\n self.headers = {} # type: Dict[str, str]\n self.tag = 0\n self.timeout = timeout\n\n self.auth = None # type: Tuple[str, str]\n if username or password:\n self.auth = (username, password)", "def __init__(self, loop, websession, host, port, api_key, **kwargs):\n self.groups = {}\n self.lights = {}\n self.scenes = {}\n self.sensors = {}\n self.config = None\n self.loop = loop\n self.session = websession\n self.host = host\n self.api_url = 'http://{}:{}/api/{}'.format(host, port, api_key)\n if 'legacy_websocket' in kwargs:\n from .websocket import WSClient as ws_client\n else:\n from .websocket import AIOWSClient as ws_client\n self.ws_client = ws_client\n self.websocket = None\n self.async_add_device_callback = kwargs.get('async_add_device')\n self.async_connection_status_callback = kwargs.get('connection_status')", "def __init__(self):\n self.host = CONF.zvm.zvm_xcat_server\n self.port = 443\n self.conn = HTTPSClientAuthConnection(self.host, self.port,\n CONF.zvm.zvm_xcat_ca_file,\n timeout=CONF.zvm.zvm_xcat_connection_timeout)", "def __init__(self, network: Network):\n self.graph = network.graph", "def __init__(self):\n self._server = None\n self._address = \"\"\n self._port = 0", "def __init__(self, username=None, password=None):\n self._username = username\n self._password = password\n self._suds_client = None", "def __init__(self):\n self.server_name = 'Binary Light Device'\n self.device = None", "def __init__(self, host, port):\n self._host = host\n self._port = port\n self._data = None", "def __init__(self, ip_address=\"127.0.0.1\", port=8777, network_timeout=120):\n self._tcp_client = TCPClient(ip_address, port, network_timeout)\n self._tcp_client.connect()\n self._tcp_lock = threading.Lock()", "def __init__(self):\n self.raw_wires = PyWires.WireNetwork();\n self.__initialize_wires();", "def __init__(self,\n host,\n username,\n password,\n port=443,\n path='/wsman',\n protocol='https'):\n super(DRACClient, self).__init__(host,\n username,\n password,\n port,\n path,\n protocol)\n self._job_mgmt = job.JobManagement(self.client)\n self._idrac_cfg = idrac_card.iDRACCardConfiguration(self.client)\n self._nic_cfg = nic.NICConfiguration(self.client)\n self._nic_mgmt = nic.NICManagement(self.client)", "def __init__(self, connectionParams) :\n self.ssh = None\n self.connected = False\n self.connObj = connectionParams", "def __init__(self):\n self.id = None\n \"\"\"\"true if individual services can be enabled/disabled\"\"\"\n self.canenableindividualservice = None\n \"\"\"\"the destination physical network\"\"\"\n self.destinationphysicalnetworkid = None\n \"\"\"\"the provider name\"\"\"\n self.name = None\n \"\"\"\"the physical network this belongs to\"\"\"\n self.physicalnetworkid = None\n \"\"\"\"services for this provider\"\"\"\n self.servicelist = None\n \"\"\"\"state of the network provider\"\"\"\n self.state = None", "def __init__(self, username = None, password = None):\n self.username = config['AUTH']['USERNAME']\n self.password = config['AUTH']['PASSWORD']\n self.login = config['URL']['LOGIN']\n self.nav_url = config['URL']['NAV']\n self.tag_url = config['URL']['TAGS']\n self.direct_url = config['URL']['DM']\n self.driver = webdriver.Chrome(config['ENVIRONMENT']['CHROMEDRIVER'])\n self.stay_logged = False\n self.api = InstagramAPI(self.username, self.password)", "def __init__(self, *args):\n _snap.TCrossNet_swiginit(self, _snap.new_TCrossNet(*args))", "def __init__(\n self, \n config: defincashier_models.Config,\n ):\n if UtilClient.is_unset(config):\n raise TeaException({\n 'code': 'ParameterMissing',\n 'message': \"'config' can not be unset\"\n })\n self._access_key_id = config.access_key_id\n self._access_key_secret = config.access_key_secret\n self._security_token = config.security_token\n self._endpoint = config.endpoint\n self._protocol = config.protocol\n self._user_agent = config.user_agent\n self._read_timeout = UtilClient.default_number(config.read_timeout, 20000)\n self._connect_timeout = UtilClient.default_number(config.connect_timeout, 20000)\n self._http_proxy = config.http_proxy\n self._https_proxy = config.https_proxy\n self._no_proxy = config.no_proxy\n self._socks_5proxy = config.socks_5proxy\n self._socks_5net_work = config.socks_5net_work\n self._max_idle_conns = UtilClient.default_number(config.max_idle_conns, 60000)\n self._max_idle_time_millis = UtilClient.default_number(config.max_idle_time_millis, 5)\n self._keep_alive_duration_millis = UtilClient.default_number(config.keep_alive_duration_millis, 5000)\n self._max_requests = UtilClient.default_number(config.max_requests, 100)\n self._max_requests_per_host = UtilClient.default_number(config.max_requests_per_host, 100)", "def __init__(self, host):\n self.host = host", "def __init__(self, host):\n self.host = host", "def __init__(self, api_key=\"\"):\n self.logger = logging.getLogger(__name__)\n self.host_url = 'https://community-api.coinmetrics.io/v2/'\n self.headers = {\"api_key\": api_key} if api_key != '' else {}", "def __init__(\n self, \n config: baasdatagw_models.Config,\n ):\n if UtilClient.is_unset(config):\n raise TeaException({\n 'code': 'ParameterMissing',\n 'message': \"'config' can not be unset\"\n })\n self._access_key_id = config.access_key_id\n self._access_key_secret = config.access_key_secret\n self._security_token = config.security_token\n self._endpoint = config.endpoint\n self._protocol = config.protocol\n self._user_agent = config.user_agent\n self._read_timeout = UtilClient.default_number(config.read_timeout, 20000)\n self._connect_timeout = UtilClient.default_number(config.connect_timeout, 20000)\n self._http_proxy = config.http_proxy\n self._https_proxy = config.https_proxy\n self._no_proxy = config.no_proxy\n self._socks_5proxy = config.socks_5proxy\n self._socks_5net_work = config.socks_5net_work\n self._max_idle_conns = UtilClient.default_number(config.max_idle_conns, 60000)\n self._max_idle_time_millis = UtilClient.default_number(config.max_idle_time_millis, 5)\n self._keep_alive_duration_millis = UtilClient.default_number(config.keep_alive_duration_millis, 5000)\n self._max_requests = UtilClient.default_number(config.max_requests, 100)\n self._max_requests_per_host = UtilClient.default_number(config.max_requests_per_host, 100)", "def __init__(self, host, server_port):\n\n # Set up the socket connection to the server\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n \n # TODO: Finish init process with necessary code\n self.host = host\n self.server_port = server_port\n self.run()", "def __init__(self):\n self.try_to_connect()", "def __init__(self):\n self.host = socket.gethostname() # 192.168.56.1\n self.port = 33000\n self.buffer_size = 1024\n self.address = (self.host, self.port)", "def __init__(self, hostname: str, port: int):\n # Create a dictionary of topics and callbacks\n self.callback_dict = dict()\n\n self.client = mqtt.Client(userdata=self.callback_dict)\n self.client.on_message = _on_message_handler\n self.client.connect(hostname, port, 60)", "def __init__(self, host, user, password, port=22):\n self.host = host\n self.user = user\n self.port = port\n self.password = password", "def __init__(self, client, network_id):\n super(NetworksMixin, self).__init__(client)\n self._network_id = network_id", "def __init__(self, server_addr, server_port):", "def __init__(self, server, username, password):\n self.server = server\n self.username = username\n self.password = password\n self.connection = None\n self.session = None", "def __init__(\n self,\n connection_retries: int = 5,\n ):\n self.sta_if = network.WLAN(network.STA_IF)\n self.ap = network.WLAN(network.AP_IF)\n self.mode = None\n self.num_retries = connection_retries\n self.cfg = ConfigRepository()\n\n with open('wifi_config.json', 'r') as f:\n wifi_dict = ujson.load(f)\n self.ssid = wifi_dict['ssid']\n self.password = wifi_dict['key']\n\n self.do_connect()", "def __init__(self, *args, **kwargs):\n super(Client, self).__init__(role='c', *args, **kwargs)\n\n # Internal variables\n self._bulksize = None\n self._server_hostname = None\n self._port = None\n self._num_streams = None\n self._zerocopy = False", "def __init__(self, *args):\n _snap.TDirNet_swiginit(self, _snap.new_TDirNet(*args))", "def __init__(self, url, username, password, timeout=10, verify_ssl=True):\n self._base_url = url + '/webapi/'\n self._username = username\n self._password = password\n self._timeout = timeout\n self._verify_ssl = verify_ssl\n self._api_info = None\n self._sid = None\n\n self._initialize_api_info()\n self._initialize_api_sid()", "def __init__(self):\n\n # For now, we'll connect to the target via the Apollo debug controller.\n # This should be replaced by a high-speed USB link soon; but for now\n # we'll use the slow debug connection.\n self._debugger = ApolloDebugger()\n self._serial = self._find_serial_connection()", "def __init__(self):\n\n self.argument_spec = netapp_utils.na_ontap_host_argument_spec()\n self.argument_spec.update(dict(\n state=dict(required=False, choices=['present', 'absent'], default='present'),\n vserver=dict(required=True, type='str'),\n name=dict(required=True, type='str'),\n owner=dict(required=False, type='str'),\n group=dict(required=False, type='str'),\n control_flags_raw=dict(required=False, type='int'),\n ))\n\n self.module = AnsibleModule(\n argument_spec=self.argument_spec,\n supports_check_mode=True,\n )\n\n # set up variables\n self.na_helper = NetAppModule()\n self.parameters = self.na_helper.set_parameters(self.module.params)\n\n if HAS_NETAPP_LIB is False:\n self.module.fail_json(msg='The python NetApp-Lib module is required')\n else:\n self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])", "def __init__(self, key=None, url=\"https://api.dreamhost.com\"):\n if not key:\n key = os.getenv('API_KEY')\n\n self._key = key\n self._url = url\n self._last_command = None\n self._connected = False\n self._available_commands = []\n\n if key:\n self.connect()", "def __init__(self, endpoint='https://www.wikidata.org/w/api.php'):\n self.endpoint = endpoint", "def __init__(\n self, \n config: shuziwuliu_models.Config,\n ):\n if UtilClient.is_unset(config):\n raise TeaException({\n 'code': 'ParameterMissing',\n 'message': \"'config' can not be unset\"\n })\n self._access_key_id = config.access_key_id\n self._access_key_secret = config.access_key_secret\n self._security_token = config.security_token\n self._endpoint = config.endpoint\n self._protocol = config.protocol\n self._user_agent = config.user_agent\n self._read_timeout = UtilClient.default_number(config.read_timeout, 20000)\n self._connect_timeout = UtilClient.default_number(config.connect_timeout, 20000)\n self._http_proxy = config.http_proxy\n self._https_proxy = config.https_proxy\n self._no_proxy = config.no_proxy\n self._socks_5proxy = config.socks_5proxy\n self._socks_5net_work = config.socks_5net_work\n self._max_idle_conns = UtilClient.default_number(config.max_idle_conns, 60000)\n self._max_idle_time_millis = UtilClient.default_number(config.max_idle_time_millis, 5)\n self._keep_alive_duration_millis = UtilClient.default_number(config.keep_alive_duration_millis, 5000)\n self._max_requests = UtilClient.default_number(config.max_requests, 100)\n self._max_requests_per_host = UtilClient.default_number(config.max_requests_per_host, 100)", "def __init__( self, **params ):\n \n host = custom( CPULimitedHost, cpu=cpuShare() ) \n link = custom( TCLink, bw=args.bandwidth, delay=delay() )\n \n Mininet.__init__(\n self,\n topo=BarrierTransactionTopo( **params ),\n host=host,\n link=link )", "def __init__(self, proj,LocalIP,ListenerPort,BroadcasterIP,BroadcasterPort):\n\n\n # Get connection settings from robot configuration file\n ipIn = LocalIP # IP address (string)\n portIn = ListenerPort # Port (number)\n ipOut = BroadcasterIP # IP address (string)\n portOut = BroadcasterPort # Port (number)\n try:\n # Create proxies to access modules\n self.robocomm = _RobotCommunicator()\n self.robocomm.start()\n time.sleep(1) # Give communicator time to start and receive first data\n except RuntimeError:\n print \"(INIT) ERROR: Cannot connect to the robot.\"\n exit(-1)", "def __init__(self, *args, **kwargs):\n self.logger = logging.getLogger(__file__)\n kwargs.setdefault('api_major', 2)\n kwargs.setdefault('api_minor', 0)\n baseclient_2_0.DataONEBaseClient_2_0.__init__(self, *args, **kwargs)\n mnclient_1_1.MemberNodeClient_1_1.__init__(self, *args, **kwargs)", "def __init__(self):\n self.__client = Client(verify_ssl_cert=True)\n self.__headers = {'Content-Type': 'application/json'}\n self.login()", "def __init__(self, username=None, password=None, terminal_id=None):\n if username is None:\n raise Exception('Username is empty')\n if password is None:\n raise Exception('Password is empty')\n if terminal_id is None:\n raise Exception('Terminal ID is empty')\n if not isinstance(terminal_id, int):\n raise Exception('Invalid terminal_id type. int type expected!')\n self.userName = username\n self.userPassword = password\n self.terminalId = terminal_id\n self.service_address = 'https://bpm.shaparak.ir/pgwchannel/services/pgw?wsdl'\n self.payment_address = 'https://bpm.shaparak.ir/pgwchannel/startpay.mellat'\n self.namespace = 'http://interfaces.core.sw.bps.com/'", "def __init__(self, endpoint, playerport1, playerport2, controlport):\n self.__endpoint = endpoint\n self.__player1 = playerport1\n self.__player2 = playerport2\n self.__control = controlport", "def __init__(\n self,\n host: str,\n port: int,\n alias: str | None,\n username: str | None,\n password: str | None,\n ) -> None:\n\n self._host = host\n self._alias = alias\n\n # Establish client with persistent=False to open/close connection on\n # each update call. This is more reliable with async.\n self._client = PyNUTClient(self._host, port, username, password, 5, False)\n self.ups_list: dict[str, str] | None = None\n self._status: dict[str, str] | None = None\n self._device_info: NUTDeviceInfo | None = None", "def __init__(self, client, userID, robotID):\r\n client.registerConnection(self)\r\n self._client = client\r\n self._userID = userID\r\n self._robotID = robotID\r\n self._avatar = None\r\n self._view = None\r\n self._namespace = None\r\n self._protocol = None", "def __init__(self, api_key=None, api_secret=None):\n\n self.session = requests.session()\n headers = {'Accept': 'application/json', 'User-Agent': 'binance/python'}\n\n if api_key is not None and api_secret is not None:\n self.set_api_key(api_key, api_secret)\n headers['X-MBX-APIKEY'] = self.API_KEY\n\n self.session.headers.update(headers)\n\n # init DNS and SSL cert\n self.ping()", "def __init__(\n self, \n config: demo_models.Config,\n ):\n if UtilClient.is_unset(config):\n raise TeaException({\n 'code': 'ParameterMissing',\n 'message': \"'config' can not be unset\"\n })\n self._access_key_id = config.access_key_id\n self._access_key_secret = config.access_key_secret\n self._security_token = config.security_token\n self._endpoint = config.endpoint\n self._protocol = config.protocol\n self._user_agent = config.user_agent\n self._read_timeout = UtilClient.default_number(config.read_timeout, 20000)\n self._connect_timeout = UtilClient.default_number(config.connect_timeout, 20000)\n self._http_proxy = config.http_proxy\n self._https_proxy = config.https_proxy\n self._no_proxy = config.no_proxy\n self._socks_5proxy = config.socks_5proxy\n self._socks_5net_work = config.socks_5net_work\n self._max_idle_conns = UtilClient.default_number(config.max_idle_conns, 60000)\n self._max_idle_time_millis = UtilClient.default_number(config.max_idle_time_millis, 5)\n self._keep_alive_duration_millis = UtilClient.default_number(config.keep_alive_duration_millis, 5000)\n self._max_requests = UtilClient.default_number(config.max_requests, 100)\n self._max_requests_per_host = UtilClient.default_number(config.max_requests_per_host, 100)", "def __init__(self, protocol):\r\n self._protocol = protocol", "def __init__(self,\r\n name=None,\r\n lan_ip=None,\r\n uplink=None,\r\n public_port=None,\r\n local_port=None,\r\n allowed_ips=None,\r\n protocol=None):\r\n\r\n # Initialize members of the class\r\n self.name = name\r\n self.lan_ip = lan_ip\r\n self.uplink = uplink\r\n self.public_port = public_port\r\n self.local_port = local_port\r\n self.allowed_ips = allowed_ips\r\n self.protocol = protocol", "def __init__(self,privateToken=None):\n self._url = 'http://www.zillow.com/webservice'\n self._privateToken = 'X1-ZWz1azdtprntor_8xo7s'\n self._connection()", "def __init__(self):\n self._host = None\n self._port = None\n self._servers = []", "def __init__(self, address, parent=None):\n super(CooperationServer, self).__init__(parent)\n \n self.__address = address", "def _build_network(self):\n pass", "def __init__(self, url=default_url, config_file=default_config, proxy_url=None):\n\n self.url = url\n self.config = None\n self.token = None\n self.proxy = None if proxy_url is None else dict(http='socks5h://{:s}'.format(proxy_url),https='socks5h://{:s}'.format(proxy_url))\n self.auth(config_file)\n self.get_providers()\n self.default_provider = None\n self.tables = None", "def __init__(self, ip, name, port = 8189, timeout = 1, retries = 5):\n self._name = name\n self._ip = ip\n self._port = port\n self._timeout = timeout\n self._retries = retries\n self._state = WifiLedShopLightState()\n self._sock = None\n self._unique_id = self.send_command(Command.GET_ID, []).decode('utf-8')\n self.update()", "def __init__(self, netdis):\n self._netdis = netdis", "def __init__(self, ip, port, header):\n \n self.header = header\n self.ip = ip\n self.port = port\n try:\n self._connect_socket()\n except socket.error as e:\n print(e)\n self.close_and_exit()", "def __init__(\n self, \n config: bot_models.Config,\n ):\n if UtilClient.is_unset(config):\n raise TeaException({\n 'code': 'ParameterMissing',\n 'message': \"'config' can not be unset\"\n })\n self._access_key_id = config.access_key_id\n self._access_key_secret = config.access_key_secret\n self._security_token = config.security_token\n self._endpoint = config.endpoint\n self._protocol = config.protocol\n self._user_agent = config.user_agent\n self._read_timeout = UtilClient.default_number(config.read_timeout, 20000)\n self._connect_timeout = UtilClient.default_number(config.connect_timeout, 20000)\n self._http_proxy = config.http_proxy\n self._https_proxy = config.https_proxy\n self._no_proxy = config.no_proxy\n self._socks_5proxy = config.socks_5proxy\n self._socks_5net_work = config.socks_5net_work\n self._max_idle_conns = UtilClient.default_number(config.max_idle_conns, 60000)\n self._max_idle_time_millis = UtilClient.default_number(config.max_idle_time_millis, 5)\n self._keep_alive_duration_millis = UtilClient.default_number(config.keep_alive_duration_millis, 5000)\n self._max_requests = UtilClient.default_number(config.max_requests, 100)\n self._max_requests_per_host = UtilClient.default_number(config.max_requests_per_host, 100)", "def __init__(self, hostname, username, password, port=5392):\n\n connection_hash = str(uuid.uuid3(uuid.NAMESPACE_OID, f'{hostname}{port}{username}{password}'))\n\n self.hostname = hostname\n self.port = port\n\n self.__auth = {\n 'data': {\n 'username': username,\n 'password': password,\n 'app_name': 'NimOS REST Client'\n }\n }\n\n self.__connection_hash = connection_hash\n\n if connection_hash in SessionManager._SESSIONS:\n self.session_id, self.session_token = SessionManager._SESSIONS[connection_hash]\n self.connected = True\n self._headers = {'X-Auth-Token': str(self.session_token)}\n\n else:\n self._headers = {}\n self.session_token = None\n self.session_id = None\n self.connected = self._connect()", "def __init__(self, host=\"localhost\", port=60151, verbose=False):\n super(IGVSocketRobot, self).__init__(verbose=verbose)\n\n self.host = host\n self.port = port", "def __init__(self, host=u'', username=u'', password=u'', secret=u'', port=22, device_type=u'', known_hosts=None,\n local_addr=None, client_keys=None, passphrase=None, loop=None):\n super().__init__(host=host, username=username, password=password, secret=secret, port=port,\n device_type=device_type, known_hosts=known_hosts, local_addr=local_addr,\n client_keys=client_keys, passphrase=passphrase, loop=loop)\n\n self._current_context = 'system'\n self._multiple_mode = False", "def __init__(self):\n self.network = Network()\n self.home_dir = os.path.expanduser('~')", "def __init__(self, **kwargs):\n self.local = salt.client.LocalClient()\n self.minion_nodes = self._query()", "def __init__(\n self,\n api_url: str,\n username: str,\n password: str,\n shared_disk: str,\n ip_resolver: IPResolver,\n verify_cert: bool = False,\n create_pool: Optional[str] = None):\n self.logger = logging.getLogger(__name__)\n self.api_url = api_url\n self.username = username\n self.password = password\n self.sess = BaseUrlSession(self.api_url + '/api2/json/')\n self.sess.verify = verify_cert\n if not self.sess.verify:\n requests.packages.urllib3.disable_warnings()\n\n self.shared_disk = shared_disk\n self.create_pool = create_pool\n self.ip_resolver = ip_resolver\n self._login()", "def __init__(self):\n self.client_id = None\n self.bridge_config = {}\n self.bridge_config_answer_status = None", "def __init__(self,\n all_endpoints_reachable=None,\n auto_register_target=None,\n auto_registration=None,\n bandwidth_limit=None,\n cluster_id=None,\n cluster_incarnation_id=None,\n compression_enabled=None,\n description=None,\n encryption_key=None,\n name=None,\n network_interface=None,\n password=None,\n purpose_remote_access=None,\n purpose_replication=None,\n remote_access_credentials=None,\n remote_ips=None,\n remote_iris_ports=None,\n reverse_registed=None,\n user_name=None,\n validate_only=None,\n view_box_pair_info=None,\n ):\n\n # Initialize members of the class\n self.all_endpoints_reachable = all_endpoints_reachable\n self.auto_register_target = auto_register_target\n self.auto_registration = auto_registration\n self.bandwidth_limit = bandwidth_limit\n self.cluster_id = cluster_id\n self.cluster_incarnation_id = cluster_incarnation_id\n self.compression_enabled = compression_enabled\n self.description = description\n self.encryption_key = encryption_key\n self.name = name\n self.network_interface = network_interface\n self.password = password\n self.purpose_remote_access = purpose_remote_access\n self.purpose_replication = purpose_replication\n self.remote_access_credentials = remote_access_credentials\n self.remote_ips = remote_ips\n self.remote_iris_ports = remote_iris_ports\n self.reverse_registed = reverse_registed\n self.user_name = user_name\n self.validate_only = validate_only\n self.view_box_pair_info = view_box_pair_info", "def __init__(self, host, server_port):\n # Set up the socket connection to the server\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.run(host, server_port)\n\n # TODO: Finish init process with necessary code\n #Vegard sier vi ikke skal skrive noe her", "def __init__(self, uuid, auth_url, project_name, username, password):\n self.uuid = uuid\n self.auth_url = auth_url\n self.project_name = project_name\n self.username = username\n self.password = password\n \n conn = pyone.OneServer(\n self.auth_url,\n session=\"{0}:{1}\".format(username, password)\n )", "def __init__(self):\n\n # labjack connection handle (default: None. If connected: labjack handler instance)\n self.connection_handle = None\n\n # labjack connection state (default: None, connection_error: False, connected: True)\n self.connection_state = False\n\n # try to connect\n self.connect()" ]
[ "0.6986114", "0.6901052", "0.6891296", "0.6836612", "0.6833463", "0.6784958", "0.67357445", "0.671517", "0.66976726", "0.66885394", "0.66739595", "0.66732055", "0.6663089", "0.6645663", "0.66445404", "0.6629692", "0.66234744", "0.662254", "0.66040313", "0.66040313", "0.6572824", "0.65646255", "0.6560609", "0.65571254", "0.65459025", "0.6542644", "0.6539374", "0.6527865", "0.65243083", "0.6524155", "0.65219826", "0.65153426", "0.64954585", "0.6490718", "0.6488532", "0.6479525", "0.647515", "0.6474842", "0.6468547", "0.6468445", "0.646052", "0.6457913", "0.64565283", "0.6447865", "0.6438143", "0.64324516", "0.64300746", "0.6427897", "0.641577", "0.641577", "0.6410821", "0.6407983", "0.64057183", "0.6404464", "0.640338", "0.64008474", "0.63962674", "0.6394496", "0.6392179", "0.6379096", "0.637897", "0.6377706", "0.6373952", "0.63719684", "0.6371788", "0.63707346", "0.6366392", "0.6365643", "0.6363512", "0.6360496", "0.63522756", "0.63522285", "0.6351603", "0.6347421", "0.6338429", "0.63370854", "0.63340926", "0.63331836", "0.63291454", "0.63290125", "0.6328796", "0.63284564", "0.63278824", "0.6327576", "0.63241655", "0.6323252", "0.6321205", "0.6318356", "0.6317068", "0.63144195", "0.6310955", "0.63095635", "0.630861", "0.63085157", "0.6307759", "0.6306016", "0.62878", "0.6284998", "0.62827015", "0.62821954", "0.62813956" ]
0.0
-1
Sets the initial state of the device.
def _initialize_data(self): self.reset_count = 0 self._idn_no_firmware = "KEPCO,BOP 50-20,E1234," self._firmware = 2.6 self._init_data()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initial_state(self, initial_state: QuantumCircuit) -> None:\n self._invalidate()\n self._initial_state = initial_state", "def initialize(self):\n self.currState = self.startState", "def resetDeviceStates(self):", "def set_initial_state(self, initial_states, userdata):\n raise NotImplementedError()", "def set_initial(self, boolean):\n self.initial = boolean", "def set_working_state(self):\n self.state = 0\n self.port = None", "def init_state(self) -> None:\n self.state = np.zeros(self.shape, dtype=int)", "def set_initial_machine_state(self, machine_state):\n\t\tself.machine_state = machine_state", "def set_state( self ):", "def doInitializeDevice(self):\n super().doInitializeDevice()", "def set_state(self, state):\n self.state = state\n self.config(fill=self.state)", "def test_device_init_command(self):\n default_val = 0\n self.assertEqual(self.sim_device.integer1, default_val)\n # Write to the attribute integer1\n self.sim_device.integer1 = 45\n self.assertEqual(self.sim_device.integer1, 45)\n # Reset the values of the device attributes to default.\n self.sim_device.Init()\n # Check that the desiredPointing attribute is reset.\n self.assertEqual(self.sim_device.integer1, default_val)", "def __setstate__(self, dict):\n\n\t\tself.__dict__ = dict\n\n\t\t# Set missing values to defaults.\n\t\tself._device = None\n\t\tself.resources = {}", "def make_initial_state(self):\n pass", "def initialise(self):\n self.device.initialise()\n return \"OK\"", "def set_state(self, state=0):\r\n return self._arm.set_state(state=state)", "def reset(self):\n self.state.fill(EMPTY)", "def reset(self):\n self._set_init()", "async def init_device(self):\n await Device.init_device(self)\n # PROTECTED REGION ID(AsyncTabata.init_device) ENABLED START #\n self.logger = logging.getLogger(__name__)\n self._lock = threading.Lock()\n self._dev_factory = DevFactory()\n self._prepare = 10\n self._work = 20\n self._rest = 10\n self._cycles = 8\n self._tabatas = 1\n self._running_state = RunningState.PREPARE\n self.subscribed = False\n self.set_state(DevState.OFF)\n # The below commented commands are not really needed\n # since in GreenMode.Asyncio mode the monitor\n # lock is disabled by default.\n # util = tango.Util.instance()\n # util.set_serial_model(tango.SerialModel.NO_SYNC)\n # PROTECTED REGION END # // AsyncTabata.init_device", "def init_state(self):\n self.read_inputs()\n if (self.in_power.value == 1) and (self.in_alert.value == 1):\n self.state = 'alert'\n elif (self.in_power.value == 1):\n self.state = 'on'\n else:\n self.state = 'off'\n self.leave_init()", "def set_device_state(self, nDeviceState):\n\t\tcall_sdk_function('PrlSrvCfgDev_SetDeviceState', self.handle, nDeviceState)", "def _reset_state(self):\n\n self.state = None\n self.use_count = 0\n\n # Guards both state and use_count\n self.cond = threading.Condition()\n\n # Incremented each time we initialise a new mount state. Aids\n # debugging.\n self.generation = 0", "def set_state(self,state):\n self.__state = state", "def set_state(self, state: int):", "def set_state(self, state):\n self.state = state", "def setState(self, state):\n self.state = state", "def initialize_state(self, state):\n print 'state initialized'\n return state", "def init(self):\n self.connect_to_switches()\n self.reset_states()", "def _reset_state(self):\n self.state = self.start_state.copy()", "def set_state(self, state: int):\n self.state = state", "def update(self):\r\n self._state = self._dev.state", "def reset(self):\n self.__init__()", "def reset(self):\n self.__init__()", "def reset(self):\n self.__init__()", "def reset(self):\n self.set_state(self._initial_state)", "def on_start(self):\n self._state = service.ServiceStateMachine(['READY'], default_state='READY')\n self._temperature = 50\n self._set_state_internal(force=True)", "def handle_reset(self):\n self.initialise()", "def _set_state(self, state):\n #print(\"** set state from %d to %d\" % (self.state, state))\n self.state = state", "def setup_device(device):\n try:\n # Gets around \"Resource busy\" errors\n device.detach_kernel_driver(0)\n except Exception:\n pass\n device.set_configuration()", "def set_state(self, value):\n self.state = value", "def reset(self):\n self.index = self.start_index\n self.state = self.initial_state\n self.tape = Tape(empty_value=self.empty_value)", "def set_load_initial_state(self, initial_state_path: str) -> None:\n self.initial_state = GameState.load(initial_state_path)", "def set_default(self, state: _base.State):\n\n if self.default is not None:\n fmt = \"Overwriting current default state '%s' with '%s'\"\n _logger.warning(fmt % (self.default, state))\n self.default = state", "def initialise(self):\n self.set_up()", "def set_state(self,s):\n self.state = s", "def state(self, state):\n self._state = state", "def initial(self):\n self.update_panel_displays()\n yield 0\n #\n if self.options.initial_state:\n self.started = True\n self.nextState(getattr(self, self.options.initial_state)())\n else:\n self.nextState(self.start_screen())", "def reset(self):\n # The camera will give no response to this command\n self._serial_io('\\x55\\x99\\x66\\x11', None)\n while True:\n try:\n self.system_state = 0x11\n if self.system_state == 0x11:\n break\n except CygnetExc:\n time.sleep(.2)\n while True:\n self.system_state = 0x12\n time.sleep(.2)\n if self.system_state == 0x16:\n break", "def initial(self, e):\r\n self._add_message(\"top-INIT;\")\r\n self.foo_ = 0\r\n self.INIT(HsmTst.d2)", "def init(self):\n self.is_init = True\n raise NotImplementedError", "def __setstate__(self, _state : dict):\n self.__init__(**_state)", "def initial_state(self):\n return 0", "def _reset_for_new_walk(self):\n # Starting State\n self.state = State('start', 0, 1, 0, 0, self.state_space_parameters.input_size, 0, 0, False)\n\n # Architecture String\n self.state_list = [self.state.copy()]", "def reset(self):\n self.reset_dev_via_serial(self.forced_reset_timeout)", "def initial_state(self):\n # Network details elided.\n initial_state = None\n\n return initial_state", "def initial_state(self):\n return None", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def change_state(self):\n new_state = 0 if self.state.state == 1 else 1\n answer = UsbHost.send_query(self.state.ser, \"SetState\", str(self.state.device_id), new_state)\n if answer in wrong_answers:\n error_message(\"Не удалось сменить состояние\")\n self.statusbar.showMessage(answer_translate[answer])\n else:\n self.statusbar.clearMessage()\n self.state.state = new_state\n if new_state == 1:\n self.set_auto_active()\n if new_state == 0:\n self.set_hand_active()", "def on_start(self):\n self.state = STARTED", "def set_init_state(\n self,\n id: int,\n cpu_cores_capacity: int,\n memory_capacity: int,\n pm_type: int,\n region_id: int,\n zone_id: int,\n data_center_id: int,\n cluster_id: int,\n rack_id: int,\n oversubscribable: PmState = 0,\n idle_energy_consumption: float = 0,\n ):\n self._id = id\n self._init_cpu_cores_capacity = cpu_cores_capacity\n self._init_memory_capacity = memory_capacity\n self._init_pm_type = pm_type\n self._init_pm_state = oversubscribable\n\n self._region_id = region_id\n self._zone_id = zone_id\n self._data_center_id = data_center_id\n self._cluster_id = cluster_id\n self._rack_id = rack_id\n\n self._idle_energy_consumption = idle_energy_consumption\n\n self.reset()", "def init_game_setting(self):\n ##################\n # YOUR CODE HERE #\n ##################\n self.state = np.zeros((1, 80, 80))\n self.clear_action()", "def initial_state(self):\n h_0 = tf.zeros([1, self._num_units], self._dtype)\n context_0 = self._compute_context(h_0)\n h_0 = context_0 * 0\n\n if self._dec_init_states is None:\n batch_size = tf.shape(self._memory)[0]\n cell_states = self._cell.zero_state(batch_size, self._dtype)\n else:\n cell_states = self._dec_init_states\n\n attn_state_0 = AttnState(cell_states, h_0, context_0)\n\n return attn_state_0", "def turn_on(self):\n self._state = True\n self.write_state(bytes([9]))\n self.schedule_update_ha_state()", "def _initialize(self):\n self.send_init_command()", "def reset(self):\n\n ## Turn off controller to bring to a known state\n try:\n self.logger.info(\"Turning off sta3800 controller (sta3800_off).\")\n ccdsetup.sta3800_off()\n except Exception:\n self.logger.exception(\"Unable to turn off controller! State may be unknown.\")\n raise\n else:\n self.logger.info(\"Controller turned off successfully.\")\n\n ## Initialize controller\n try:\n self.logger.info(\"Turning on sta3800 controller (sta3800_setup).\")\n ccdsetup.sta3800_setup()\n except Exception:\n self.logger.exception(\"Unable to turn on sta3800 controller!\")\n raise\n else:\n self.logger.info(\"Controller turned on successfully.\")", "def Initialise(self):\n self.__m_Platform.Initialise()\n self.__m_Pump.Initialise( False )", "def __setstate__(self, state):\n return None", "def __init__(self, device):\n self._unique_id = device\n self._device = AehW4a1(device)\n self._fan_modes = FAN_MODES\n self._swing_modes = SWING_MODES\n self._preset_modes = PRESET_MODES\n self._attr_available = False\n self._on = None\n self._current_temperature = None\n self._target_temperature = None\n self._attr_hvac_mode = None\n self._fan_mode = None\n self._swing_mode = None\n self._preset_mode = None\n self._previous_state = None", "def set_state(self, state):\n self._env.set_state(state)", "def __init__(\n self,\n initial_flag: bool\n ):\n\n super().__init__()\n\n if initial_flag:\n self.set()\n else:\n self.clear()\n\n self.abort = False", "async def init(self):\n logger.info(\"Init device: %s\", self._serial)\n self._callback(STATUS_INIT)\n\n self._init_binaries()\n self._init_apks()\n await self._init_forwards()\n\n await adb.shell(self._serial, \"/data/local/tmp/atx-agent server --stop\")\n await adb.shell(self._serial, \"/data/local/tmp/atx-agent server --nouia -d\")", "def start(self):\n self.reset()\n self.on_start()", "def on(self):\n self._set_state(on=True)", "def _reset(self):\n self._click()\n if self._touch is None:\n self._state = STATE_INACTIVE", "def init_game_setting(self):\n self.state.state_counter_while_testing += 1", "def init_state(self):\n self._state = {\"logger\": None}", "def start(self):\r\n self.setDriver('ST', 1)", "def initialize_state(self):\n raise NotImplementedError()", "def reset(self):\n # Sample random state from initial state distribution\n self._cur_state = self._sample_state(self._mdp.I)\n self._prev_state = self._cur_state", "def __init__(self):\n self.update_state()", "def __init__(self, init_state):\n self._curr_state = init_state", "def set_to_start(self) -> None:\n start_config = self._path_points[0]\n self._mobile.set_2d_pose(start_config[:3])\n self._path_done = False", "def send_init_event(self):\n self.status['type'] = '__init__'\n self._send()", "def reset(self):\n self.state = \"YYYYRRRRGGGGOOOOBBBBWWWW\"", "def reset(self):\n self.rst.value(0) # RST on\n self.sleep_us(100) # reset impulse has to be >100 ns and <100 ms\n self.rst.value(1) # RST off\n # Defaults after reset:\n self.power = self.POWER_DOWN\n self.addressing = self.ADDRESSING_HORIZ\n self.instr = self.INSTR_BASIC\n self.display_mode = self.DISPLAY_BLANK\n self.temp_coeff = self.TEMP_COEFF_0\n self.bias = self.BIAS_1_11\n self.voltage = 3060", "def __setstate__(self, state):\n\n self.set(DER = state)", "def __init__(self, cb, model_unique_id, initial_data=None):\n super(USBDevice, self).__init__(cb, model_unique_id, initial_data)\n if model_unique_id is not None and initial_data is None:\n self._refresh()\n self._full_init = True", "def set_state(self, state: int):\n self.splash.set_progress_value(self.state)\n self.state = state\n time.sleep(1)", "def set_state(self, state):\n self.env.sim.set_state(state)\n mjlib.mj_fwdPosition(self.env.sim.model, self.env.sim.data)\n mjlib.mj_fwdVelocity(self.env.sim.model, self.env.sim.data)\n mjlib.mj_fwdAcceleration(self.env.sim.model, self.env.sim.data)\n mjlib.mj_fwdActuation(self.env.sim.model, self.env.sim.data)\n mjlib.mj_fwdConstraint(self.env.sim.model, self.env.sim.data)\n self._sync_dynmodel()" ]
[ "0.7504116", "0.690324", "0.6809412", "0.67910904", "0.67115295", "0.6696538", "0.6672575", "0.6632381", "0.6580465", "0.6563067", "0.6559918", "0.64950496", "0.64455366", "0.642056", "0.64086676", "0.6338755", "0.6338366", "0.6336666", "0.63347834", "0.6327715", "0.629293", "0.6281045", "0.627568", "0.6262225", "0.62346506", "0.62324435", "0.62241334", "0.62174326", "0.621685", "0.62142116", "0.62049615", "0.61645544", "0.61645544", "0.61645544", "0.61421615", "0.6128799", "0.6105846", "0.609789", "0.6044728", "0.6033319", "0.6032202", "0.60087186", "0.60030574", "0.5999335", "0.5990645", "0.59884375", "0.59860414", "0.59845394", "0.59597355", "0.5954927", "0.5954003", "0.5949279", "0.59483415", "0.5946219", "0.59130317", "0.59052783", "0.5902018", "0.5902018", "0.5902018", "0.5902018", "0.5902018", "0.5902018", "0.5902018", "0.5902018", "0.5902018", "0.5902018", "0.5902018", "0.5902018", "0.5902018", "0.59007156", "0.5898137", "0.5887524", "0.5879095", "0.58783126", "0.5872992", "0.5850521", "0.584923", "0.58492213", "0.58488953", "0.58367205", "0.5823673", "0.5819965", "0.5806157", "0.5800905", "0.57923913", "0.57922465", "0.57906324", "0.5781926", "0.5778223", "0.5770045", "0.57673246", "0.5764703", "0.57620025", "0.57473445", "0.5747038", "0.5736259", "0.5735782", "0.57328653", "0.57248217", "0.572427", "0.5719254" ]
0.0
-1
Reset the device, reinitialising the data.
def reset(self): self.reset_count += 1 self._init_data()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def device_reset(self):\n\t\tlogger.info('Device Reset')\n\t\tself.spi.writebytes([0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff])\n\t\tprint(DELIMITER*'-')", "def resetDevice(self):\n reset_pkt = [START_BYTE_1, START_BYTE_2, RESET_MTYPE, 0x00, HEADER_SIZE + RESET_DATA_SIZE]\n reset_pkt.extend(RESET_KEY_LE)\n\n crc = crc8(reset_pkt)\n reset_pkt.append(crc)\n\n self.write(bytearray(reset_pkt))", "def reset_data(self):\n self.data = None", "def reset(self):\n self.reset_dev_via_serial(self.forced_reset_timeout)", "def reset(self):\n self.temp_data.clear()", "def reset(self):\n self.data = self._defaults", "def reset(self):\n self._data = []", "def reset(self):\n self._buffer.fill(0)", "def reset_data(self):\n self.data = []", "def resetDeviceStates(self):", "def doShutdownDevice(self):\n if self.device is not None:\n self.device.reset()\n self.device = None", "def reset(self):\n self.mode = 0\n self.graphs = [[], [], []]\n self.coefficients = []\n self.sample = []", "def reset(self):\n self.work_state = work_state[\"Measuring\"]\n self.report_mode = report_mode[\"Initiative\"]\n self.duty_cycle = 0\n self.logger.info(\"{}: sensor resetted.\".format(self.sensor_name))", "def reset(self):\n self.write_to_serial('*RST')", "def system_reset(self):\n data = chr(self.SYSTEM_RESET)\n self.pymata.transport.write(data)\n\n # response table re-initialization\n # for each pin set the mode to input and the last read data value to zero\n with self.pymata.data_lock:\n # remove all old entries from existing tables\n for _ in range(len(self.digital_response_table)):\n self.digital_response_table.pop()\n\n for _ in range(len(self.analog_response_table)):\n self.analog_response_table.pop()\n\n # reinitialize tables\n for pin in range(0, self.total_pins_discovered):\n response_entry = [self.pymata.INPUT, 0]\n self.digital_response_table.append(response_entry)\n\n for pin in range(0, self.number_of_analog_pins_discovered):\n response_entry = [self.pymata.INPUT, 0]\n self.analog_response_table.append(response_entry)", "def clear(self):\n self.initialize()\n self.device_disconnect()", "def reset(self):\n self._buffer.clear()", "def reset(self) -> None:\n # See section 7.2.2 of the datasheet for reset description.\n self._reset.value = True\n time.sleep(0.0001) # 100 us\n self._reset.value = False\n time.sleep(0.005) # 5 ms", "def reset(self) -> None:\n\n self.host.reset()", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\n self.enc_len = None\n self.precomputed_enc_h = None\n self.mask = None\n self.prev_attn = None", "def reset(self):\n self._write(0x16, 1, 3, 0x08)", "def reset(self):\n self.ser.close()\n self.ser.open()", "def reset(self):\n # Attempt to reset data loader\n self.data_loader_iter = iter(self.data_loader)\n self.num_batches = 0\n\n # Make sure calibrator will check the cache again when reset.\n self.cache_contents = None", "def Reset(self, ResetD = False):\n if ResetD: self.D = self.DInit\n self.NAcc = 0.\n self.NAtt = 0.", "def reset(self):\n \n pass", "def reset(self):\n self.data = {}\n self.is_bound = False\n self._errors = None", "def reset(self):\n\n self.memory = self.program.copy()\n self.output = 0\n self.stop_code = 0\n self.code_position = 0\n self.input_position = 0\n self.input_parameters = []", "def reset(self):\r\r\n self.read(\"*cls\")\r\r\n self.waitForCompletion()\r\r\n self.read(\"*RST\") # Reset and query\r\r\n self.dev.write(\"*cls\")\r\r\n while self.read(\"*OPC?\") != \"1\": time.sleep(1) # Wait until completion\r\r", "def reset(self):\n if self.monotonic_energy is not None:\n self.monotonic_energy.reset()\n if self.chunk_energy is not None:\n self.chunk_energy.reset()\n self.bd_L_prev = 0\n self.key_tail = None", "def reset(self):\n self._set_init()", "def reset(self) -> None:\n self.memory = self.intcode.copy()\n self.ip = 0\n self.stdout.clear()", "def reset(self):\n self.ram = Memory(256 * 10)\n self.stdout = ''\n self.input_ptr = 0\n self.pc = 0\n self.stack = []\n logging.debug('Reset all')", "def reset(self):\r\n _debug('simq03b_api.reset')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\n\t\tself.buf = []", "def reset(self):\n raise NotImplementedError", "def reset(self):\n raise NotImplementedError", "def reset(self):\n raise NotImplementedError", "def reset(self):\n raise NotImplementedError", "def reset(self):\n raise NotImplementedError", "def reset_data_recorder(self):\n\n self.t_values = []\n self.x_values = []\n self.tau_values = []", "def reset(self):\n\n # the 'cached' data to be displayed by the hex view\n self.data = None\n self.mask = None\n self.data_size = 0\n self.delta = None\n\n self.address = 0\n self.fade_address = 0\n\n # pinned memory / breakpoint selections\n self._pinned_selections = []", "def reset(self):\n self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_RESET, (), '', '')", "def reset(self):\n self.N = self.N[0:2]\n self.t = 2\n self.harvest_available = 0\n self.harvest_record = [0,0]", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self):\n self.memory.clear()\n self.relative_base = 0\n self.input_queue.clear()\n self.instr_idx = 0", "def reset(self) -> None:\n pass", "def reset(self) -> None:\n raise NotImplementedError", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self) -> None:", "def reset(self) -> None:", "def reset(self) -> None:", "def reset(self):\n self.count = 0\n self.soft = False\n self.can_double = True\n self.can_split = False\n self.first_card = 0", "def _reset(self):\n pass", "def _doResetMemory(self):\n self._cmdClearMemory()\n time.sleep(1)\n self._cmdResetParameters()\n time.sleep(1)", "def reset(self):\n\t\tpass", "def reset():\r\n pass", "def reset(self):\n # type: () -> None\n self.digest.clear()\n self.offset.clear()\n self.buffer.clear()\n self.position = 0\n self.counter = 0\n self.finished = False", "def reset(self, reset):\n\n self._reset = reset", "def reset_data(self):\n try:\n self._data = self._schema({})\n except vol.Invalid as ex:\n _LOGGER.error(\"Can't reset %s: %s\",\n self._file, humanize_error(self._data, ex))", "def reset(self):\r\n pass", "def reset(self):\r\n pass", "def reset(self):\n self.u0.fill(0.)\n self.u1.fill(0.)\n self.u2.fill(0.)\n self.time = 0.", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()", "def reset(self):\n self.enc_len = None\n self.precomputed_enc_h = None\n self.mask = None", "def reset():", "def reset():", "def reset():", "def reset(self):\n self.values = None\n self.keys = None\n self.mask = None", "def Reset(self):\n pass", "def test_reset():\n dev = _aws_device(wires=2)\n dev._circuit = CIRCUIT\n dev._task = TASK\n\n dev.reset()\n assert dev.circuit is None\n assert dev.task is None", "def _reset(self) -> None:" ]
[ "0.758682", "0.75735646", "0.7518953", "0.7389302", "0.73468316", "0.71213037", "0.70714784", "0.69749707", "0.6920218", "0.6890552", "0.68882835", "0.683326", "0.6819545", "0.68126434", "0.6779973", "0.67533934", "0.6750631", "0.67285883", "0.6715532", "0.67061126", "0.67061126", "0.67061126", "0.67061126", "0.6678892", "0.6676467", "0.66700745", "0.6667878", "0.66676563", "0.6664625", "0.6648453", "0.6645851", "0.66309303", "0.66288036", "0.66258144", "0.6618612", "0.6617027", "0.66166896", "0.6606088", "0.66047865", "0.66047865", "0.66047865", "0.66047865", "0.66047865", "0.65923077", "0.6592274", "0.65911406", "0.6578877", "0.65746135", "0.65746135", "0.65746135", "0.65746135", "0.65746135", "0.65746135", "0.65746135", "0.65746135", "0.6568016", "0.6557879", "0.6555305", "0.65356684", "0.65356684", "0.65356684", "0.65356684", "0.65356684", "0.65356684", "0.65356684", "0.65356684", "0.65356684", "0.65356684", "0.65356684", "0.65356684", "0.65356684", "0.65356684", "0.65356684", "0.65356684", "0.65356684", "0.6528318", "0.6528318", "0.6528318", "0.6521879", "0.6517719", "0.65173393", "0.65116274", "0.65096045", "0.6507159", "0.6506906", "0.6500963", "0.64999723", "0.64999723", "0.6497854", "0.64909065", "0.64909065", "0.64909065", "0.6476428", "0.64761263", "0.64761263", "0.64761263", "0.6475102", "0.6473717", "0.6465832", "0.6465609" ]
0.75327283
2
Add all run parsers.
def add_subparser( subparsers: SubParsersAction, parents: List[argparse.ArgumentParser] ) -> None: run_parser = subparsers.add_parser( "run", parents=parents, conflict_handler="resolve", formatter_class=argparse.ArgumentDefaultsHelpFormatter, help="Starts a Rasa server with your trained model.", ) run_parser.set_defaults(func=run) run_subparsers = run_parser.add_subparsers() sdk_subparser = run_subparsers.add_parser( "actions", parents=parents, conflict_handler="resolve", formatter_class=argparse.ArgumentDefaultsHelpFormatter, help="Runs the action server.", ) sdk_subparser.set_defaults(func=run_actions) arguments.set_run_arguments(run_parser) arguments.set_run_action_arguments(sdk_subparser)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_command_parsers(self) -> None:\n for command in self.commands:\n self.get_command_parser(command)", "def run_parse(self):\n # Data set already has source file names from load_inputs\n parsedset = {}\n parsedset['data_set'] = []\n for log in self.input_files:\n parsemodule = self.parse_modules[self.args.parser]\n try:\n if self.args.tzone:\n parsemodule.tzone = self.args.tzone\n except NameError: pass\n parsedset['data_set'].append(parsemodule.parse_file(log))\n self.data_set = parsedset\n del(parsedset)", "def RegisterParsers(cls, parser_classes):\n for parser_class in parser_classes:\n cls.RegisterParser(parser_class)", "def list_parsers(self, *args):\n print('==== Available parsing modules: ====\\n')\n for parser in sorted(self.parse_modules):\n print(self.parse_modules[parser].name.ljust(16) + \\\n ': ' + self.parse_modules[parser].desc)\n sys.exit(0)", "def ImportParsers(cls, import_dir):\n sys.path.append(import_dir)\n cls.elf_parser = importlib.import_module(\n \"vts.utils.python.library.elf_parser\")\n cls.vtable_parser = importlib.import_module(\n \"vts.utils.python.library.vtable_parser\")", "def fill_subparsers(subparsers):\n\n\t\tfor cls in BaseCommand.__subclasses__():\n\t\t\tcls.add_subparser(subparsers)", "def add_runs(self,runs):\n for r in runs:\n self.add(r)", "def get_all_parsers():\n return [OptimizerFactory.get_parser(optimizer) for optimizer in OptimizerFactory.optimizers]", "def add_argparse_subparser(subparsers):\n\n new_sub_parser = subparsers.add_parser(\n PluginManager.argparse_subparser_name(), help=\"plugin commands\"\n )\n PluginManager.__argparse_subparser = new_sub_parser\n plugin_subparsers = new_sub_parser.add_subparsers(\n dest=PluginManager.__root_subparser_name\n )\n\n sub_sub_parser = plugin_subparsers.add_parser(\n \"list\", help=\"list the available plugins\"\n )\n sub_sub_parser.add_argument(\n \"--all\",\n dest=\"show_all\",\n action=\"store_true\",\n default=False,\n help=\"show all loaded plugins (default is False)\",\n )\n sub_sub_parser.add_argument(\n dest=\"list_filter\",\n default=None,\n help=\"filter\",\n nargs=\"?\",\n type=PluginManager.__list_filter_type,\n )\n sub_sub_parser = plugin_subparsers.add_parser(\n \"info\", help=\"information on a specific plugin\"\n )\n sub_sub_parser.add_argument(\n dest=\"info_filter\",\n default=None,\n type=PluginManager.__info_filter_type,\n help=\"an id\",\n )", "def prepare_multiple_out_parsers(run_dict):\n output_parser_dict = {}\n for run_label, run_name in run_dict.items():\n output_parser_dict[run_label] = OutputParser(run_name, use_most_recent=False)\n return output_parser_dict", "def parse(self):\n for section in self.sections:\n section.parse()", "def build_parsers(self, parser: argparse.ArgumentParser):\n for opt in reversed(self._options):\n parser.add_argument(*opt.args, **opt.kwargs)\n parser.set_defaults(_cmd=self, _parser=parser)\n\n if self._children:\n subparsers = parser.add_subparsers(title=\"commands\")\n for child in self._children:\n if child._name is None:\n raise CLIError(\n f\"Children {child._func} should be wrapped with\"\n \" @command\")\n subparser = subparsers.add_parser(child._name, **child._kwargs)\n child.build_parsers(subparser)", "def whole(parsers):\n if len(parsers) == 0:\n return finished >> (lambda x: [])\n if len(parsers) == 1:\n return parsers[0] + finished >> (lambda x: x[:-1])\n return reduce(add, parsers) + skip(finished)", "def add_to_OptionParser(parser): \n\n parser.set_defaults(input_plugin=None,\n output_plugin=None)\n \n for plugin in registry:\n #print \"add to option parse\", plugin.id\n group = optparse.OptionGroup(parser,\n \"For %s plugin (modify settings BEFORE calling plugin)\" % (plugin.id))\n\n plugin.add_to_OptionParser(parser, group)\n\n for option in getattr(plugin, 'options', ()):\n option.add_to_OptionParser(plugin, parser, group)\n parser.add_option_group(group)", "def run(self, parsed):", "def d_parsers(self):\n\n return self._d_parsers", "def _parse_sons(self, d_parsers, verbose=False):\n\n treated = set(d_parsers.keys())\n imports = set(self.imports.keys())\n imports = imports.difference(treated)\n if not imports:\n return d_parsers\n\n for source in imports:\n if verbose:\n print ('>>> treating :: {}'.format(source))\n\n # get the absolute path corresponding to source\n\n filename = get_filename_from_import(source,self._output_folder)\n\n q = Parser(filename)\n q.parse(d_parsers=d_parsers)\n d_parsers[source] = q\n\n # link self to its sons\n\n imports = list(self.imports.keys())\n for source in imports:\n d_parsers[source].append_parent(self)\n self.append_son(d_parsers[source])\n\n return d_parsers", "def parse_file(self, parse_all=False, file=None):\n if not parse_all:\n input()\n for line in sys.stdin:\n self.parse_and_add(line)", "def run(self):\n for tool in self.tools:\n tool.run()\n return", "def load_parsers_from_plugins(subparser, plugins):\n for plugin_name, plugin_class in plugins.items():\n # create a parser object for the plugin.\n plugin_parser = subparser.add_parser(\n plugin_name,\n description = plugin_class.__doc__,\n )\n\n plugin_parser.add_argument('vpc_name', help='The VPC\\'s Name tag.')\n\n try:\n # Assume class plugin with 'setup_parser' and 'main' staticmethods.\n plugin_class.setup_parser(plugin_parser)\n plugin_parser.set_defaults(func = plugin_class.main)\n except AttributeError:\n # Assume function plugin w/o 'setup_parser' or 'main' staticmethods.\n plugin_parser.set_defaults(func = plugin_class)", "def add_parser_options(cls, parser):\n for arg in cls.configurables():\n getattr(cls, arg).add_argument(parser)", "def add_subparsers(cls, parser, name=\"\", prefixes=[], delim=\"_\", title=\"commands\", description=\"available commands\", required=True):\n\t\tcommand = f\"command_{name}\"\n\t\tif command in cls.COMMANDS:\n\t\t\traise CommandParserNameDuplicated(f\"Command parser with name {name} already registered.\")\n\t\t\n\t\tcls.COMMANDS[command] = {}\n\t\t\n\t\tsub = parser.add_subparsers(title=title, dest=command, description=description)\n\t\tsub.required = True\n\t\tfor pf in prefixes:\n\t\t\tfor c, method in cls.get_commands(prefix=pf, delim=delim):\n\t\t\t\tcls.set_subparser_for(c, method, sub)\n\t\t\t\tcls.COMMANDS[command][c] = method", "def addAllFactories(self) -> None:\n ...", "def run_parser(self, path, app=None, renderer=None, failed=False):\n self._stats = RenderStats()\n\n filters = {'app': app,\n 'renderer': renderer,\n 'failed': failed}\n\n # Read all files that are properly named\n filenames = [os.path.join(path, rf) for rf in os.listdir(path) if re.match(r'renders_\\d{4}-\\d{2}-\\d{2}.csv', rf)]\n\n for fn in filenames:\n for render in filter_rows(fn, filters):\n # Update stats based on this render\n self._stats.update(render)", "def add(cls, subparsers):\n subparser = subparsers.add_parser(\n name=cls.__tool_name__(),\n description=cls.__get_description__())\n\n cls.__add_arguments__(subparser)\n subparser.set_defaults(func=cls.from_args)\n return subparser", "def get_parsers(self):\n return tuple([getattr(self, '_{}'.format(i)) for i in self.parsers_available])", "def parse(self, commands):\n raise NotImplementedError()", "def add_parse_action(self, *fns, callDuringTry=False):\n output = self.copy()\n output.parse_action += list(map(wrap_parse_action, fns))\n output.set_config(\n callDuringTry=self.parser_config.callDuringTry or callDuringTry\n )\n return output", "def add_loaders(self, loaders):\n # type: (List[AbstractTemplateLoader]) -> None\n self.runtime_configuration_builder.add_loaders(loaders)", "def arg_parser(self):\n print\n\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n\n status = subparsers.add_parser('status', help=self.viewer.status.__doc__)\n status.set_defaults(func=self.viewer.status)\n\n view_catalog = subparsers.add_parser('catalog', help=self.viewer.view_full_catalog.__doc__)\n view_catalog.set_defaults(func=self.viewer.view_full_catalog)\n\n view_tags = subparsers.add_parser('tags', help=self.viewer.view_all_tags.__doc__)\n view_tags.add_argument('-a', '--alpha', action='store_true', help=\"List tags alphabetically\")\n view_tags.set_defaults(func=self.viewer.view_all_tags)\n\n search = subparsers.add_parser('search', help=self.viewer.search_tags.__doc__)\n search.add_argument('-t', '--tags', nargs='+', required=True, help=\"Tags to search for\")\n search.add_argument('-o', action='store_true', help=\"Search using 'or' logic ('and' logic by default)\")\n search.set_defaults(func=self.viewer.search_tags) \n\n add_file = subparsers.add_parser('addfile', help=self.editor.add_files.__doc__)\n add_file.add_argument('filename', help=\"Name of file to add catalog\")\n add_file.set_defaults(func=self.editor.add_files)\n\n add_tags = subparsers.add_parser('addtags', help=self.editor.add_tag.__doc__)\n add_tags.add_argument('filename', help=\"Name of file to add catalog\")\n add_tags.add_argument('-t', '--tags', nargs='+', help=\"Tags to add to catalog\")\n add_tags.set_defaults(func=self.editor.add_tag)\n\n edit_entry = subparsers.add_parser('edit', help=self.editor.edit_entry.__doc__)\n edit_entry.add_argument('filename', help=\"Name of file to add catalog\")\n edit_entry.set_defaults(func=self.editor.edit_entry)\n\n clean_catalog = subparsers.add_parser('clean', help=self.editor.clean_catalog.__doc__)\n clean_catalog.add_argument('-t', '--tags', nargs='+', help=\"Tags to be deleted from catalog entirely\")\n clean_catalog.set_defaults(func=self.editor.clean_catalog)\n\n delete_entry = subparsers.add_parser('delete', help=self.editor.delete_entry.__doc__)\n delete_entry.add_argument('filename', help=\"File from which to delete specified tags\")\n delete_entry.add_argument('-t', '--tags', nargs='+', help=\"Tags to be deleted\")\n delete_entry.set_defaults(func=self.editor.delete_entry)\n\n merge_tags = subparsers.add_parser('merge', help=self.editor.merge_tags.__doc__)\n merge_tags.add_argument('--source', required=True, help=\"File from which tags are being taken\")\n merge_tags.add_argument('--dest', required=True, help=\"Destination file to which tags from --source are added\")\n merge_tags.set_defaults(func=self.editor.merge_tags)\n\n \n args = parser.parse_args()\n\n args.func(args)", "def add_parser(self, name, func):\n self.__parser_map__[name] = _func2method(func, method_name=name)\n return None", "def get_parser(stages):\n\n # martian\n parser = argparse.ArgumentParser(prog=\"martian\")\n subparsers = parser.add_subparsers()\n\n # martian stage\n stage_parser = subparsers.add_parser(\n \"stage\", help=\"Work with Martian stages.\")\n\n stage_subparsers = stage_parser.add_subparsers(\n title=\"Stage subcommands\",\n help=\"Actions than can be performed on Martian stages.\")\n\n # martian stage list\n stage_list_parser = stage_subparsers.add_parser(\n \"list\",\n help=\"List all available stages.\")\n stage_list_parser.set_defaults(func=stage_list)\n stage_list_parser.set_defaults(stages=stages)\n\n # martian stage describe <stage_name>\n stage_describe_parser = stage_subparsers.add_parser(\n \"describe\",\n help=\"Describe the inputs, outputs, and source location of a stage\")\n stage_describe_parser.add_argument(\n \"stage_name\",\n help=\"Name of the stage to describe\")\n stage_describe_parser.set_defaults(func=stage_describe)\n stage_describe_parser.set_defaults(stages=stages)\n\n # martian stage run <stage_name> <stage_phase> <stage_args...>\n stage_run_parser = stage_subparsers.add_parser(\n \"run\",\n help=\"Run a stage\")\n\n stage_run_subparsers = stage_run_parser.add_subparsers(\n title=\"Stages available to be run\",\n help=\"Names of available stages.\")\n\n for stage in stages.values():\n\n individual_stage_parser = stage_run_subparsers.add_parser(\n stage.name,\n help=\"Execute stage \" + stage.name)\n individual_stage_subparsers = individual_stage_parser.add_subparsers()\n\n # Some stages don't have a split or join\n available_stage_phases = ['split', 'join', 'main'] if (stage.splits or stage.force_split) else ['main']\n\n for phase in available_stage_phases:\n phase_parser = individual_stage_subparsers.add_parser(\n phase,\n help='Run the ' + phase + ' of ' + stage.name)\n\n phase_parser.set_defaults(func=stage_run)\n phase_parser.set_defaults(phase=phase)\n phase_parser.set_defaults(stage=stage)\n\n for input_ in _stage_inputs(stage, phase):\n help_message = \"Type: \" + input_.type\n if input_.help:\n help_message += \" Help: \" + input_.help\n phase_parser.add_argument(\n \"--\" + input_.name,\n type=martian_type_to_python_type(input_.type),\n nargs=martian_type_to_nargs(input_.type),\n default=None,\n help=help_message)\n\n # Handle the \"split_file\" for mains that come after a split\n if phase == 'main' and 'split' in available_stage_phases:\n phase_parser.add_argument(\n \"--split_file\",\n type=martian_type_to_python_type(\"File\"),\n nargs=martian_type_to_nargs(\"File\"),\n default=None,\n help=\"File with split arguments.\")\n\n return parser", "def add_command_parsers(parser, logparser):\n subparsers = parser.add_subparsers(metavar='Command')\n help_text = 'ONE OF THE FOLLOWING:\\n'\n available_commands = find_commands(\n os.path.join(os.path.abspath(os.path.dirname(__file__)), os.path.pardir, 'functions'))\n max_length = max([len(a) for a in available_commands])\n for command in available_commands:\n child_parser = subparsers.add_parser(command, parents=[logparser])\n call = importlib.import_module('functions.%s'% command)\n if hasattr(call, 'set_argparser'):\n call.set_argparser(child_parser)\n else:\n child_parser.description = 'Description is missing'\n help_text += command + \": \" + \" \"*(max_length-len(command)) + ('\\n'+' '*(max_length+2)\n ).join(textwrap.wrap(child_parser.description,60)) + '\\n'\n child_parser.set_defaults(func=call.main)\n subparsers.help = help_text + '\\nType \"Command --help\" for more information about given command'", "def register_entrypoints(self):\n for entrypoint in entrypoints.get_group_all(\"mlflow.run_context_provider\"):\n try:\n self.register(entrypoint.load())\n except (AttributeError, ImportError) as exc:\n warnings.warn(\n 'Failure attempting to register context provider \"{}\": {}'.format(\n entrypoint.name, str(exc)\n ),\n stacklevel=2\n )", "def parse(self) -> None:\n self._parse_zone_files()\n self._process_rules()\n self._process_zones()\n self._process_links()", "def set_parser(*, collected, parser=None):\n if parser is None:\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n commands = unique(collected)\n for name, details in commands.items():\n original = details.original\n args = details.extra\n a_subparser = subparsers.add_parser(name)\n a_subparser.set_defaults(\n __gather_name__=name,\n __gather_command__=original,\n )\n for arg_details in args:\n a_subparser.add_argument(*arg_details.args, **dict(arg_details.kwargs))\n return parser", "def register(\n self,\n root_parser: argparse.ArgumentParser,\n subparsers: argparse._SubParsersAction,\n ) -> None:\n self.root_parser = root_parser\n parser = subparsers.add_parser(\n self.name,\n aliases=self.aliases,\n help=self.help,\n description=self.help,\n add_help=self.add_help,\n )\n parser.set_defaults(command=self)\n self.configure(parser)", "def setRegexParsers(self, value):\n return self._set(regexParsers=value)", "def register_post_parser(self, fct, cfg, ctx):\n self.post_parsers.append((fct, cfg, ctx))", "def build_all(self):\n log.debug('Build all')\n print('Building...')\n\n log.debug('Detail')\n print('\\nAdding to details table for:')\n self.dict_parse('DetailParser', 'Detail')\n\n log.debug('Vendor')\n print('\\nAdding to vendors table for:')\n self.list_parse('VendorParser', 'Vendor')\n\n log.debug('Vendee')\n print('\\nAdding to vendees table for:')\n self.list_parse('VendeeParser', 'Vendee')\n\n log.debug('Location')\n print('\\nAdding to locations table for:')\n self.list_parse('LocationParser', 'Location')", "def add_parser(subparsers) -> None:\n contest_parser = subparsers.add_parser(\n 'contest', help='build contest files')\n mut_ex_group = contest_parser.add_mutually_exclusive_group()\n mut_ex_group.add_argument(\n '-p', '--pdf', action='store_true', default=False, help='generate contest PDFs')\n mut_ex_group.add_argument('-i', '--io', action='store_true',\n default=False, help='generate contest input/output files')\n contest_parser.add_argument(\n 'problem_dir', help='path to problem(s)', nargs='+')\n contest_parser.add_argument(\n 'contest_dir', help='directory which the contest will be saved')\n contest_parser.set_defaults(function=lambda options: process_contest(\n options.problem_dir, options.contest_dir, options.pdf, options.io))", "def _init_add(self):\n def add(core, args):\n return core.add(args.start, args.stop, args.task)\n\n usage = 'stl add start stop [task]'\n desc = (\n 'directly add a log entry; '\n 'you can also do this from python, take a look at '\n 'stl.core.Core.add()'\n )\n\n subp = self.subparsers.add_parser(\n 'add', usage=usage, description=desc,\n help=desc[:desc.find(';')])\n\n subp.add_argument(\n 'start',\n help='when work on the task started; use %%Y-%%m-%%dT%%H:%%M')\n subp.add_argument(\n 'stop',\n help='when work on the task stopped; use %%Y-%%m-%%dT%%H:%%M')\n subp.add_argument(\n 'task', nargs='?', default='',\n help='the task being worked on; optional')\n\n subp.set_defaults(func=add)", "def add_subparser(subparsers):\n parser = subparsers.add_parser('run', help=\"run artifact\")\n parser.add_argument('run_config', default='cps.ini',\n help=\"run configuration file\")\n parser.add_argument('-p', '--persist', action=\"store_true\", default=False,\n help=\"to persist data, dont delete sandbox after use\")\n\n parser.set_defaults(func=main)", "def addAll(self, *args):\n pass", "def addAll(self,*args, **kwargs):\n pass", "def parse_files():\n pfuncs = [ # parse py files : add #\n parse_test_files,\n parse_model_files,\n parse_url_files,\n parse_route_files,\n parse_settings_files,\n parse_setup_files,\n ]\n\n while PY_FILES:\n for _ in range(len(pfuncs)):\n a_func = pfuncs.pop()\n a_func()\n break", "def __init__(self):\r\n super(TestParser, self).__init__([self.TestHandler()])", "def run_providers(self, argv):\n\n for name, provider in self.providermanager:\n provider = provider(self)\n self.produce_output(provider.title,\n provider.location,\n provider.run(argv))", "def _initialize_all():\n registry.clear_checkers()\n registry.clear_contexts()\n cli._register_internal_plugins.has_run = ( # pylint: disable=protected-access\n False\n )\n cli._register_internal_plugins() # pylint: disable=protected-access", "def executeAll(lines):", "def parse(self):\n\t\tself.maincfg_values = self._load_static_file(self.cfg_file)\n\t\t\n\t\tself.cfg_files = self.get_cfg_files()\n\t\t\n\t\tself.resource_values = self.get_resources()\n\t\t\n\t\tself.timestamps = self.get_timestamps()\n\t\t\n\t\t## This loads everything into\n\t\tfor cfg_file in self.cfg_files:\n\t\t\tself._load_file(cfg_file)\n\n\t\tself._post_parse()", "def run_all_plugins(self):\n logger.info(\"Starting all plugins\")\n for name in self.name_to_plugin_class:\n if self.name_to_enabled[name]:\n self.run_plugin(name)", "def parse_all(self):\n\n # Generates a list of apartment urls\n self.parse_apartment_urls()\n\n # Parses each apartment url and stores it in apartment_data\n for apartment_url in self.apartment_urls:\n self.parse_single_page(apartment_url)", "def _parse_extensions(self):\n for root in self.roots:\n for extensions in root.iter('extensions'):\n for extension in extensions.iter('extension'):\n extension_name = extension.attrib.get('name', '')\n #print(f'Extension: {extension_name}')\n self.extensions.append(extension_name)\n\n extension_apis = extension.attrib.get('supported', '')\n extension_api_list = set(extension_apis.split('|'))\n\n # filter by api\n if 'gl' not in extension_apis:\n continue\n\n for require in extension.iter('require'):\n for enum in require.iter('enum'):\n enum_name = enum.attrib.get('name', '')\n self.enum_list.append(enum_name)\n self.enum_required_by_extension[enum_name].append({\n \"name\": extension_name,\n \"api_list\": extension_api_list})\n for command in require.iter('command'):\n command_name = command.attrib['name']\n self.command_list.append(command_name)\n self.command_required_by_extension[command_name].append({\n \"name\": extension_name,\n \"api_list\": extension_api_list})", "def start_all(self):\n for proc in self.get_all():\n proc.start()", "def _register_commands(self):\n cmds = []\n cmd_help = CommandParser(\"help\", \"Show help for a command.\")\n cmd_help.add_argument(\n \"command\",\n nargs=\"*\",\n help=\"The command to get help for. Specify multiple names to get help for subcommands.\",\n )\n cmd_help.add_argument(\"-m\", \"--module\", help=\"List all commands from the given module\")\n cmd_help.add_argument(\n \"-f\",\n \"--full\",\n action=\"store_true\",\n help='Include descriptions in the \"all\" help output.',\n )\n cmds.append(cmd_help)\n\n target_mod = CommandParser()\n target_mod.add_argument(\"module\", nargs=\"+\", help=\"Target module(s)\")\n target_mod.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=\"protocol\",\n default=\"feature\",\n dest=\"mtype\",\n help=\"Target is a protocol module\",\n )\n cmd_module = CommandParser(\"module\", \"Manage and query ZeroBot modules\")\n add_subcmd = cmd_module.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"load\", description=\"Load a module\", parents=[target_mod])\n add_subcmd(\"reload\", description=\"Reload a module\", parents=[target_mod])\n subcmd_list = add_subcmd(\"list\", description=\"List available modules\")\n subcmd_list.add_argument(\"-l\", \"--loaded\", action=\"store_true\", help=\"Only loaded modules\")\n list_group = subcmd_list.add_mutually_exclusive_group()\n default_categories = [\"protocol\", \"feature\"]\n list_group.add_argument(\n \"-f\",\n \"--feature\",\n action=\"store_const\",\n const=[\"feature\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only feature modules\",\n )\n list_group.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=[\"protocol\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only protocol modules\",\n )\n add_subcmd(\"info\", description=\"Show module information\", parents=[target_mod])\n cmds.append(cmd_module)\n\n save_reload_args = CommandParser()\n save_reload_args.add_argument(\n \"config_file\",\n nargs=\"*\",\n help=\"Name of config file (without .toml extension). Omit to affect all loaded config files.\",\n )\n set_reset_args = CommandParser()\n set_reset_args.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n cmd_config = CommandParser(\"config\", \"Manage configuration\")\n add_subcmd = cmd_config.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"save\", description=\"Save config files to disk\", parents=[save_reload_args])\n subcmd_savenew = add_subcmd(\"savenew\", description=\"Save config file to a new path\")\n subcmd_savenew.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n subcmd_savenew.add_argument(\"new_path\", help=\"The path to save the config file to\")\n add_subcmd(\n \"reload\",\n description=\"Reload config files from disk\",\n parents=[save_reload_args],\n )\n subcmd_set = add_subcmd(\"set\", description=\"Modify config settings\", parents=[set_reset_args])\n subcmd_set.add_argument(\n \"key_path\",\n help=\"The config key to set. Subkeys are separated by dots, e.g. 'Core.Backup.Filename'\",\n )\n subcmd_set.add_argument(\"value\", nargs=\"?\", help=\"The new value. Omit to show the current value.\")\n subcmd_reset = add_subcmd(\n \"reset\",\n description=\"Reset config settings to last loaded value\",\n parents=[set_reset_args],\n )\n subcmd_reset.add_argument(\n \"key_path\",\n nargs=\"?\",\n help=(\n \"The config key to set. Subkeys are separated by dots, \"\n \"e.g. 'Core.Backup.Filename'. If omitted, the entire \"\n \"config will be reset.\"\n ),\n )\n subcmd_reset.add_argument(\n \"-d\",\n \"--default\",\n action=\"store_true\",\n help=\"Set the key to its default value instead. Effectively unsets a config key.\",\n )\n cmds.append(cmd_config)\n\n cmd_version = CommandParser(\"version\", \"Show version information\")\n cmds.append(cmd_version)\n\n cmd_restart = CommandParser(\"restart\", \"Restart ZeroBot.\")\n cmd_restart.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_restart)\n\n cmd_quit = CommandParser(\"quit\", \"Shut down ZeroBot.\")\n cmd_quit.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_quit)\n\n cmd_wait = CommandParser(\"wait\", \"Execute a command after a delay\")\n cmd_wait.add_argument(\n \"delay\",\n help=\"Amount of time to delay. Accepts the following modifier suffixes: 'ms', 's' (default), 'm', 'h'.\",\n )\n cmd_wait.add_argument(\"command\", help=\"Command to delay\")\n cmd_wait.add_argument(\"args\", nargs=argparse.REMAINDER, help=\"Command arguments\")\n cmds.append(cmd_wait)\n\n cmd_cancel = CommandParser(\"cancel\", \"Cancel a waiting command\")\n cancel_group = cmd_cancel.add_mutually_exclusive_group()\n cancel_group.add_argument(\"id\", type=int, nargs=\"?\", help=\"The ID of a waiting command\")\n cancel_group.add_argument(\"-l\", \"--list\", action=\"store_true\", help=\"List currently waiting commands\")\n cmds.append(cmd_cancel)\n\n cmd_backup = CommandParser(\"backup\", \"Create a database backup\")\n cmd_backup.add_argument(\"name\", type=Path, help=\"Backup filename\")\n cmds.append(cmd_backup)\n\n self.command_register(\"core\", *cmds)", "def subparser( parser, subparsers ):", "def _add_parsed_files(self, files):\n path = os.path.join(self.path, 'roas_parsed.csv')\n with open(path, 'w+') as f:\n for line in files:\n f.write(line + '\\n')\n utils.csv_to_db(Historical_ROAs_Parsed_Table, path)", "def run_all(self):\n runs = []\n for run in self.benchmarks:\n run.start()\n run.wait()\n runs.append(run.metadata)\n return runs", "def main():\n stats.set_time_start()\n\n if config.options.show_progress:\n stats.start_monitor()\n\n recorders = Recorder.launch(config.options.recorders)\n\n try:\n for filename in config.filenames:\n parser.parse(filename)\n\n Recorder.wait_empty()\n except KeyboardInterrupt:\n pass\n\n stats.set_time_stop()\n\n if config.options.show_progress:\n stats.stop_monitor()\n\n stats.print_summary()", "def add_arguments(self, parser):\n\n cmd = self # make sure we can use sub parser in django. via stack_overflow\n\n class SubParser(CommandParser):\n \"\"\"Use to avoid the error when using sub parser in django's add_arguments method.\"\"\"\n def __init__(self, **kwargs):\n super(SubParser, self).__init__(cmd, **kwargs)\n\n # add custom sub commands.\n\n subparsers = parser.add_subparsers(\n title=\"sub commands\",\n parser_class=SubParser,\n dest='sub_command',\n help='Sub commands you can use.'\n )\n\n # actions to start or stop socket server.\n\n server = subparsers.add_parser('server', help=\"Server Commands\")\n server.add_argument(\n 'action',\n metavar='ACTION',\n choices=self.socket_server_actions,\n help='Actions is: <%s>' % '|'.join(self.socket_server_actions),\n )\n\n # actions of targets when calling server is running.\n\n proxy = subparsers.add_parser('proxy', help=\"Proxy Commands\")\n proxy.add_argument(\n '-a', '--action',\n metavar='ACTION',\n required=True,\n choices=self.proxy_job_actions,\n help='Actions is: <%s>' % '|'.join(self.proxy_job_actions)\n )\n proxy.add_argument(\n '-t', '--targets',\n metavar='TARGET',\n nargs='*',\n help='Targets can be empty which means ALL, you can list targets by <./manage.py mirrordata proxy -a ping>.'\n )", "def add_parse_arguments(self, parser):\n parser.add_argument('command', help='The daemon command: start|status|stop|restart')\n parser.add_argument('--pid_file', help='The pid_file of the daemon')", "def add_subcommands(cls, parser: argparse.ArgumentParser) -> None:\n if cls.SUBCOMMANDS:\n subparsers = parser.add_subparsers(title=\"subcommands\", metavar=\"\", dest='cmd')\n for subcmd_class in cls.SUBCOMMANDS:\n parsers = subcmd_class.get_args()\n subcmd_class.parser = parsers[-1]\n\n subparser = subparsers.add_parser(\n subcmd_class.NAMES[0],\n aliases=subcmd_class.NAMES[1:],\n parents=parsers,\n help=subcmd_class.HELP,\n epilog=subcmd_class.EPILOG)\n subparser.set_defaults(command_class=subcmd_class)\n subcmd_class.customize_subparser(subparser)", "def import_additional_parser():\n # Parse arguments\n try:\n global add_parser\n import add_parser\n except ImportError as e:\n print('No additional parser found.')\n pass", "def parse_lines(self, lines):\n assert isinstance(lines, Iterable)\n\n for line in lines:\n name, values = self.parse_line(line)\n self.add(name, values)", "def test_multiple_parsers():\n rules = []\n first_parser = BlockParser(rules)\n assert len(first_parser.rules) == 0\n\n rules.append((lambda x: True, 1.0))\n second_parser = BlockParser(rules)\n assert len(second_parser.rules) == 1\n\n assert len(first_parser.rules) == 0, \"Non-local mutation of a parser's rules\"", "def setup_parser(self, parser):", "def add_arg_parser(subparsers):\n # add\n add_p = subparsers.add_parser('add', description='Create a bundle from a .csv, .tsv, or a directory of files.')\n add_p.add_argument('-t', '--tag', nargs=1, type=str, action='append',\n help=\"Set one or more tags: 'dsdt add -t authoritative:True -t version:0.7.1'\")\n add_p.add_argument('bundle', type=str, help='The destination bundle in the current context')\n add_p.add_argument('path_name', type=str, help='File or directory of files to add to the bundle', action='store')\n add_p.set_defaults(func=lambda args: _add(args))", "def run_test_suites(self, suites):\n for suite_class in suites:\n test_suite = suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def add_commands(subparsers, path):\n log.debug('importing %s', path)\n try:\n del sys.modules[path]\n except KeyError:\n pass\n try:\n package = importlib.import_module(path)\n except Exception as e:\n log.warning('failed to import commands package %s',\n path, exc_info=True)\n return\n log.debug('commands package: %s', path)\n for (finder, name, ispkg) in pkgutil.iter_modules(package.__path__):\n if ispkg:\n continue\n try:\n command = importlib.import_module('.' + name, path)\n except Exception as e:\n log.warning('failed to import %s command: %s',\n path, name, exc_info=True)\n continue\n if not getattr(command, 'run', None):\n log.warning('skipping command module without run function: %s',\n name)\n continue\n log.debug('command: %s'%(name))\n name = command.__name__.split('.')[-1]\n parser_help = getattr(command, 'parser_help', None)\n if parser_help is None:\n log.warning('command %s missing help text'%(command.__name__))\n parser = subparsers.add_parser(name, help=parser_help)\n command.add_arguments(parser)\n parser.set_defaults(run=command.run)", "def _add_to_cli(self, parser, group=None):\n dest = self.dest\n if group is not None:\n dest = group.name + '_' + dest\n\n subparsers = parser.add_subparsers(dest=dest,\n title=self.title,\n description=self.description,\n help=self.help)\n # NOTE(jd) Set explicitly to True for Python 3\n # See http://bugs.python.org/issue9253 for context\n subparsers.required = True\n\n if self.handler is not None:\n self.handler(subparsers)", "def parse(self):\n return []", "def eval(self) -> None:\n for module in self.modules.values():\n module.eval()\n return", "def add_parser(cls, base_parser, functions_list):\n super(BaseAG, cls).add_parser(base_parser, functions_list)\n crossovers = cros_factory.crossover_factory()\n mutations = mut_factory.mutation_factory()\n representations = repr_factory.representations_factory()\n selections = selection_factory.selection_factory()\n\n # add crossover\n cls._parser.add_argument(\"--crossover\", required=True,\n choices=utils.get_names(crossovers),\n help=\"What crossover to use.\")\n\n cls._parser.add_argument(\"--mutation\", required=True,\n choices=utils.get_names(mutations),\n help=\"What mutation to use.\")\n\n cls._parser.add_argument(\"--representation\", required=True,\n choices=utils.get_names(representations),\n help=\"What representation to use.\")\n\n cls._parser.add_argument(\"--selection\", required=True,\n choices=utils.get_names(selections),\n help=\"What selection to use.\")\n\n cls._parser.add_argument(\"--population\", type=int,\n help=\"The population size.\")\n cls._parser.add_argument(\"--selection_crossover\", type=float,\n help=\"What procentage of the populations \"\n \"should be selected for crossover\")\n cls._parser.add_argument(\"--selection_mutation\", type=float,\n help=\"What procentage of the populations \"\n \"should be selected for mutation\")\n cls._parser.add_argument(\"--generations\", type=int,\n help=\"How many generations we should evolve\")\n\n cls._parser.set_defaults(population=100, selection_crossover=0.4,\n selection_mutation=0.4, generations=100)", "def collectPlugins(self):\n\t\tself.locatePlugins()\n\t\tself.loadPlugins()", "def runall():\n sclogic.runall()", "def run(self, registry):", "def addArguments(self, parser):\r\n self.argparseHelper.addArg(parser)", "def _parse_and_build_commands(self):\n for root in self.roots:\n for commands in root.iter('commands'):\n for command_element in commands.iter('command'):\n try:\n self._collect_command(command_element)\n\n except Exception as exception:\n command_name = GLGenerator.get_command_name(command_element)\n print('Error processing command {}: {}'.format(command_name, str(exception)))\n raise\n\n extension_name_max_len = 0\n for extension in self.extensions:\n extension_name_max_len = max(extension_name_max_len, len(extension))\n\n enum_value = 1\n declarations = []\n map_entries = []\n case_entries = []\n\n for extension in sorted(set(self.extensions)):\n quoted_extension = '\"' + extension + '\"'\n declaration = f' Extension_{extension:{extension_name_max_len}} = {enum_value:>6}'\n map_entry = ' g_extension_map.insert(std::pair<std::string, Extension>({0:{1}}, Extension::Extension_{2:{3}}));'.format(\n quoted_extension, extension_name_max_len + 2, extension, extension_name_max_len\n )\n case_entry = ' case Extension::Extension_{0:{1}}: return \"{0}\";'.format(\n extension, extension_name_max_len\n )\n declarations.append(declaration)\n map_entries.append (map_entry)\n case_entries.append(case_entry)\n enum_value += 1\n\n declarations.append(f' Extension_Count = {enum_value:>6}')\n self.extension_enum_declarations = ',\\n'.join(declarations)\n self.extension_map_entries = '\\n'.join(map_entries)\n self.extension_case_entries = '\\n'.join(case_entries)\n\n commands = set(self.command_list)\n\n commands = sorted(commands)\n\n command_name_max_len = 0\n for command in commands:\n command_name_max_len = max(command_name_max_len, len(command))\n\n enum_value = 1\n declarations = []\n map_entries = []\n case_entries = []\n for command in commands:\n declaration = f' Command_{command:{command_name_max_len}} = {enum_value:>6}'\n map_entry = ' g_command_map.insert(std::pair<std::string, Command>({0:{1}}, Command::Command_{2:{1}}));'.format(\n '\"' + command + '\"', command_name_max_len, command\n )\n case_entry = ' case Command::Command_{0:{1}}: return \"{0}\";'.format(\n command, command_name_max_len\n )\n declarations.append(declaration)\n map_entries.append (map_entry)\n case_entries.append(case_entry)\n enum_value += 1\n\n declarations.append(' Command_Count = {:>6}'.format(enum_value))\n self.command_enum_declarations = ',\\n'.join(declarations)\n self.command_map_entries = '\\n'.join(map_entries)\n self.command_case_entries = '\\n'.join(case_entries)", "def add_tokens(self, tokens):\n self.result.extend([d for d in tokens])", "def add_subparsers(dct, **kwargs):\n def _add_subparsers(parser):\n factory = parser.add_subparsers(**kwargs)\n # hack: bypass bug in python3 argparse\n # http://stackoverflow.com/questions/22990977/why-does-this-argparse-code-behave-differently-between-python-2-and-3\n factory.required = True\n for name in sorted(dct.keys()):\n funcs = dct[name]\n _subparser = factory.add_parser(name)\n build_arg_parser(funcs, _subparser)\n return _add_subparsers", "def run(self):\n for hook in self.hooks:\n logging.info(\"running hook: %s\" % hook)\n self.run_hook(hook)", "def add_complete_hic_subparser(subparsers):\n parser = subparsers.add_parser(\"hic-complete\", help=\"HiFive HiC Project Complete Analysis Function: Create all necessary files (Fend, Data, and Project) and learn correction parameters for a HiFive HiC project.\", epilog=\"For command line options of each normalization approach, type: %(prog)s <ALGORITHM> -h\")\n subparser2 = parser.add_subparsers(dest='algorithm')\n prob_parser = subparser2.add_parser(\"probability\", help=\"A probability model based approach for learning correction values associated with each fend. Learning is accomplished using gradient descent.\")\n exp_parser = subparser2.add_parser(\"express\", help=\"An appoximation based approach for learning correction values associated with each fend. Learning is accomplished using a variation of matrix balancing.\")\n bin_parser = subparser2.add_parser(\"binning\", help=\"A multivariate binning probability model-based approach for learning correction values associated with fend characteristics. Learning is accomplished using the Broyden-Fletcher-Goldfarb-Shanno algorithm.\")\n binprob_parser = subparser2.add_parser(\"binning-probability\", help=\"A chained-correction approach first learning fend characteristic corrections and applying them prior to learning fend-associated correction values via a probability model.\")\n binexp_parser = subparser2.add_parser(\"binning-express\", help=\"A chained-correction approach first learning fend characteristic corrections and applying them prior to learning fend-associated correction values via a matrix-balancing approximation.\")\n add_complete_hic_group(prob_parser)\n add_hic_probability_group(prob_parser)\n add_complete_hic_group(exp_parser)\n add_hic_express_group(exp_parser)\n add_complete_hic_group(bin_parser)\n add_hic_binning_group(bin_parser)\n add_complete_hic_group(binprob_parser)\n add_hic_probability_group(binprob_parser)\n add_hic_binning_group(binprob_parser)\n add_complete_hic_group(binexp_parser)\n add_hic_express_group(binexp_parser)\n add_hic_binning_group(binexp_parser)\n return", "def find_add_all(self):\n print(\"start find_add_all\")\n cflib.crtp.init_drivers()\n available = cflib.crtp.scan_interfaces()\n for i in available:\n print \"InterfacewithURI [%s] found, name [%s]\" % (i[0],i[1])\n self.add_uri(i[0])\n print(\"end\")", "def populate_argparse(self, parser, name=None):\n\n for name, container in self.keys.items():\n container.populate_argparse(parser, name=name)", "def run(self):\n self._params = self.parsingcommands()\n self.start()", "def setup_parser_report(subparsers):\r\n subparsers.add_parser('report', help='Freeseer reporting functions')", "def _create_outliners(self):\n\n if not self._registered_outliner_classes:\n LOGGER.warning('No registered outliner classes found!')\n return\n\n for outliner_type, outliner_class in reversed(self._registered_outliner_classes.items()):\n new_outliner = outliner_class(project=self._project)\n self.add_outliner(outliner_type, new_outliner)", "def add_fivecnormalize_subparser(subparsers):\n parser = subparsers.add_parser(\"5c-normalize\", help=\"HiFive 5C Project Normalization Function: Learn correction parameters for a HiFive 5C project.\", epilog=\"For command line options of each normalization approach, type: %(prog)s <ALGORITHM> -h\")\n subparser2 = parser.add_subparsers(dest='algorithm')\n prob_parser = subparser2.add_parser(\"probability\", help=\"A probability model based approach for learning correction values associated with each fragment. Learning is accomplished using gradient descent.\")\n exp_parser = subparser2.add_parser(\"express\", help=\"An appoximation based approach for learning correction values associated with each fragment. Learning is accomplished using a variation of matrix balancing.\")\n bin_parser = subparser2.add_parser(\"binning\", help=\"A multivariate binning probability model-based approach for learning correction values associated with fragment characteristics. Learning is accomplished using the Broyden-Fletcher-Goldfarb-Shanno algorithm.\")\n binprob_parser = subparser2.add_parser(\"binning-probability\", help=\"A chained-correction approach first learning fragment characteristic corrections and applying them prior to learning fragment-associated correction values via a probability model.\")\n binexp_parser = subparser2.add_parser(\"binning-express\", help=\"A chained-correction approach first learning fragment characteristic corrections and applying them prior to learning fragment-associated correction values via a matrix-balancing approximation.\")\n probbin_parser = subparser2.add_parser(\"probability-binning\", help=\"A chained-correction approach first learning fragment-associated correction values via a probability model and applying them prior to learning fragment characteristic corrections.\")\n expbin_parser = subparser2.add_parser(\"express-binning\", help=\"A chained-correction approach first learning fragment-associated correction values via a matrix-balancing approximation and applying them prior to learning fragment characteristic corrections.\")\n add_fivec_normalize_group(prob_parser)\n add_fivec_probability_group(prob_parser)\n add_fivec_normalize_group(exp_parser)\n add_fivec_express_group(exp_parser)\n add_fivec_normalize_group(bin_parser)\n add_fivec_binning_group(bin_parser)\n add_fivec_normalize_group(binprob_parser)\n add_fivec_binning_group(binprob_parser)\n add_fivec_probability_group(binprob_parser)\n add_fivec_normalize_group(binexp_parser)\n add_fivec_binning_group(binexp_parser)\n add_fivec_express_group(binexp_parser)\n add_fivec_normalize_group(probbin_parser)\n add_fivec_probability_group(probbin_parser)\n add_fivec_binning_group(probbin_parser)\n add_fivec_normalize_group(expbin_parser)\n add_fivec_express_group(expbin_parser)\n add_fivec_binning_group(expbin_parser)\n return", "def add_commands(parser, subparsers):\n subparser = subparsers.add_parser('libraries', help='search for LogicBlox libraries')\n subparser.set_defaults(func=execute_libraries)\n\n subparser.add_argument('libraries',\n nargs='*',\n help=\"libraries to locate\")\n\n subparser.add_argument('--libpath',\n help=\"library path to search\")\n \n subparser.add_argument('--dependencies', '-d',\n default=False,\n action='store_true',\n help=\"print the libraries upon which a library depends\")\n\n subparser.add_argument('--quiet', '-q',\n default=False,\n action='store_true',\n help=\"do not display any information. Used when simply querying the exit code\")\n subparser", "def add_run(self):\n r = _Element('a:r', _nsmap)\n _SubElement(r, 'a:t')\n # work out where to insert it, ahead of a:endParaRPr if there is one\n endParaRPr = _child(self.__p, 'a:endParaRPr')\n if endParaRPr is not None:\n endParaRPr.addprevious(r)\n else:\n self.__p.append(r)\n return Run(r)", "def load_handlers(self):\n\t\tself.handlers = []\n\t\tfor mod in os.listdir('classes/handlers'):\n\t\t\tif mod == '__init__.py' or mod[-3:] != '.py':\n\t\t\t\tcontinue\n\t\t\tlib = __import__(mod[:-3], locals(), globals())\n\t\t\tself.handlers.append(lib)\n\t\t#\n\t\tself.handlers.sort(key=lambda x: x.order, reverse=False)\n\t\tprint(\"Loaded handlers: \", ', '.join([x.tag for x in self.handlers]) )\n\t\tassert len(self.handlers)>0", "def apply(self):\n \n def apply_extensions(extensions):\n for extension in extensions:\n verify_type_or_subclass(extension, Extension)\n if isclass(extension):\n extension = extension()\n extension.apply_to_phase(self)\n apply_extensions(extension.extensions)\n\n apply_extensions(self.extensions)\n\n for hook in self.hooks:\n hook(self)", "def start_all(self):\n self.process_list = []\n for allocator in self.allocator_list:\n process = Process(target=allocator.start)\n process.start()\n self.process_list.append(process)", "def add_parser_options(parser_addoption, with_defaults: bool = True) -> None:\n parser_addoption(\n \"--tavern-global-cfg\",\n help=\"One or more global configuration files to include in every test\",\n nargs=\"+\",\n )\n parser_addoption(\n \"--tavern-http-backend\",\n help=\"Which http backend to use\",\n default=\"requests\" if with_defaults else None,\n )\n parser_addoption(\n \"--tavern-mqtt-backend\",\n help=\"Which mqtt backend to use\",\n default=\"paho-mqtt\" if with_defaults else None,\n )\n parser_addoption(\n \"--tavern-strict\",\n help=\"Default response matching strictness\",\n default=None,\n nargs=\"+\",\n )\n parser_addoption(\n \"--tavern-use-default-traceback\",\n help=\"Use normal python-style traceback\",\n default=False,\n action=\"store_true\",\n )\n parser_addoption(\n \"--tavern-always-follow-redirects\",\n help=\"Always follow HTTP redirects\",\n default=False,\n action=\"store_true\",\n )\n parser_addoption(\n \"--tavern-file-path-regex\",\n help=\"Regex to search for Tavern YAML test files\",\n default=r\".+\\.tavern\\.ya?ml$\",\n action=\"store\",\n nargs=1,\n )", "def _run_validation_chain(self, validators):\n for validator in validators:\n try:\n validator(self)\n except ValidationStop as e:\n # print('ValidationStop')\n if e.args and e.args[0]:\n self.errors.append(e.args[0])\n break\n except ValueError as e:\n # print('ValueError')\n self.errors.append(e.args[0])\n except Exception as e:\n print(f'Some Exception {e.args[0]}')", "def _import_plugins(self) -> None:\n logger.debug('Importing plugins')\n self._pm = pluggy.PluginManager('sirbot')\n self._pm.add_hookspecs(hookspecs)\n\n for plugin in self.config['sirbot']['plugins']:\n try:\n p = importlib.import_module(plugin)\n except (ModuleNotFoundError, ):\n if os.getcwd() not in sys.path:\n sys.path.append(os.getcwd())\n p = importlib.import_module(plugin)\n else:\n raise\n self._pm.register(p)", "def loadInputFiles(self):\n\t\tfor filename in self.input_filename_list:\n\t\t\tfor module in self.modules:\n\t\t\t\tmodule.Add(filename)", "def _run_extra_validators(self, data):\n errors = defaultdict(list)\n for validator in self.get_extra_validators():\n validator.set_instance(self.instance)\n try:\n validator(data)\n except ValidationError as exc:\n for field, field_errors in exc.detail.items():\n errors[field] += field_errors\n return errors", "def extended_parse(self):\n\t\t## Do the initial parsing\n\t\tself.parse()\n\n\t\t## First, cycle through the hosts, and append hostgroup information\n\t\tindex = 0\n\t\tfor host in self.data['all_host']:\n\t\t\tif host.has_key('register') and host['register'] == '0': continue\n\t\t\tif not host.has_key('host_name'): continue\n\t\t\tif not self.data['all_host'][index]['meta'].has_key('hostgroup_list'):\n\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'] = []\n\n\t\t\t## Append any hostgroups that are directly listed in the host definition\n\t\t\tif host.has_key('hostgroups'):\n\t\t\t\tfor hostgroup_name in self._get_list(host, 'hostgroups'):\n\t\t\t\t\tif not self.data['all_host'][index]['meta'].has_key('hostgroup_list'):\n\t\t\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'] = []\n\t\t\t\t\tif hostgroup_name not in self.data['all_host'][index]['meta']['hostgroup_list']:\n\t\t\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'].append(hostgroup_name)\n\n\t\t\t## Append any services which reference this host\n\t\t\tservice_list = []\n\t\t\tfor service in self.data['all_service']:\n\t\t\t\tif service.has_key('register') and service['register'] == '0': continue\n\t\t\t\tif not service.has_key('service_description'): continue\n\t\t\t\tif host['host_name'] in self._get_active_hosts(service):\n\t\t\t\t\tservice_list.append(service['service_description'])\n\t\t\tself.data['all_host'][index]['meta']['service_list'] = service_list\n\t\t\t\t\t\n\n\t\t\t## Increment count\n\t\t\tindex += 1\n\n\t\t## Loop through all hostgroups, appending them to their respective hosts\n\t\tfor hostgroup in self.data['all_hostgroup']:\n\n\t\t\tfor member in self._get_list(hostgroup,'members'):\n\t\t\t\tindex = 0\n\t\t\t\tfor host in self.data['all_host']:\n\t\t\t\t\tif not host.has_key('host_name'): continue\n\n\t\t\t\t\t## Skip members that do not match\n\t\t\t\t\tif host['host_name'] == member:\n\n\t\t\t\t\t\t## Create the meta var if it doesn' exist\n\t\t\t\t\t\tif not self.data['all_host'][index]['meta'].has_key('hostgroup_list'):\n\t\t\t\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'] = []\n\n\t\t\t\t\t\tif hostgroup['hostgroup_name'] not in self.data['all_host'][index]['meta']['hostgroup_list']:\n\t\t\t\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'].append(hostgroup['hostgroup_name'])\n\n\t\t\t\t\t## Increment count\n\t\t\t\t\tindex += 1\n\n\t\t## Expand service membership\n\t\tindex = 0\n\t\tfor service in self.data['all_service']:\n\t\t\tservice_members = []\n\n\t\t\t## Find a list of hosts to negate from the final list\n\t\t\tself.data['all_service'][index]['meta']['service_members'] = self._get_active_hosts(service)\n\n\t\t\t## Increment count\n\t\t\tindex += 1" ]
[ "0.6505622", "0.641105", "0.62048805", "0.601413", "0.60013145", "0.589798", "0.5883723", "0.585354", "0.5844148", "0.58043873", "0.564003", "0.5558875", "0.55376494", "0.54931134", "0.5423032", "0.53750455", "0.5364569", "0.5346068", "0.53338164", "0.5296625", "0.5261934", "0.52582324", "0.5246025", "0.52292687", "0.5223513", "0.5216324", "0.51815575", "0.5180202", "0.5160334", "0.5150853", "0.5146182", "0.5139852", "0.5129904", "0.51278013", "0.5121203", "0.5119517", "0.5116288", "0.51068026", "0.5105335", "0.5089035", "0.508517", "0.50708926", "0.5068386", "0.5057751", "0.50257367", "0.50246143", "0.49782228", "0.49747896", "0.49304664", "0.49039316", "0.49017584", "0.4887337", "0.48828888", "0.48807922", "0.4877798", "0.48756605", "0.4871298", "0.486942", "0.48688075", "0.48668307", "0.48625177", "0.48462138", "0.4845184", "0.4837395", "0.48334995", "0.48175937", "0.48009565", "0.47974515", "0.47929382", "0.47927833", "0.4772266", "0.4743076", "0.47424674", "0.47389692", "0.4737173", "0.47227368", "0.4713277", "0.47127098", "0.4707758", "0.47061604", "0.46929616", "0.46879598", "0.4687138", "0.46870792", "0.46865988", "0.4685401", "0.4684534", "0.46841383", "0.46838427", "0.4681224", "0.46747822", "0.4672075", "0.46715358", "0.46688297", "0.46676147", "0.4666865", "0.46613368", "0.46561903", "0.46388844", "0.4632418" ]
0.59972507
5
Entrypoint for `rasa run`.
def run(args: argparse.Namespace) -> None: import rasa args.endpoints = rasa.cli.utils.get_validated_path( args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True ) args.credentials = rasa.cli.utils.get_validated_path( args.credentials, "credentials", DEFAULT_CREDENTIALS_PATH, True ) if args.enable_api: if not args.remote_storage: args.model = _validate_model_path(args.model, "model", DEFAULT_MODELS_PATH) rasa.run(**vars(args)) return # if the API is not enable you cannot start without a model # make sure either a model server, a remote storage, or a local model is # configured import rasa.model from rasa.core.utils import AvailableEndpoints # start server if remote storage is configured if args.remote_storage is not None: rasa.run(**vars(args)) return # start server if model server is configured endpoints = AvailableEndpoints.read_endpoints(args.endpoints) model_server = endpoints.model if endpoints and endpoints.model else None if model_server is not None: rasa.run(**vars(args)) return # start server if local model found args.model = _validate_model_path(args.model, "model", DEFAULT_MODELS_PATH) local_model_set = True try: rasa.model.get_local_model(args.model) except ModelNotFound: local_model_set = False if local_model_set: rasa.run(**vars(args)) return rasa.shared.utils.cli.print_error( f"No model found. You have three options to provide a model:\n" f"1. Configure a model server in the endpoint configuration and provide " f"the configuration via '--endpoints'.\n" f"2. Specify a remote storage via '--remote-storage' to load the model " f"from.\n" f"3. Train a model before running the server using `rasa train` and " f"use '--model' to provide the model path.\n" f"For more information check {DOCS_BASE_URL}/model-storage." )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_rasa_server(run_event):\n print('START RASA SERVER')\n if os.getenv('RASA_ACTIONS_URL') and len(\n os.getenv('RASA_ACTIONS_URL')) > 0:\n # ensure rasa endpoints file matches RASA_ACTIONS_URL env var\n endpoints_file = open(\n os.path.join(\n os.path.dirname(__file__),\n '../rasa/endpoints.yml'),\n \"r\")\n endpoints = yaml.load(endpoints_file.read(), Loader=yaml.FullLoader)\n endpoints['action_endpoint'] = {\"url\": os.getenv('RASA_ACTIONS_URL')}\n # write updates\n with open(os.path.join(os.path.dirname(__file__), '../rasa/endpoints.yml'), 'w') as outfile:\n yaml.dump(endpoints, outfile, default_flow_style=False)\n\n cmd = ['rasa', 'run', '--enable-api']\n process2 = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n shell=False,\n cwd=os.path.join(\n os.path.dirname(__file__),\n '../rasa'))\n while run_event.is_set():\n time.sleep(1)\n process2.terminate()\n process2.wait()", "def train_rasa():\n print('TRAIN RASA')\n\n if ARGS.generate:\n cmd = ['npx chatito --format rasa data/']\n call(\n cmd,\n shell=True,\n cwd=os.path.join(\n os.path.dirname(__file__),\n '../rasa/chatito'))\n print('CONVERT TO RASA MD')\n convert_training_data(\n data_file=os.path.join(\n os.path.dirname(__file__),\n '../rasa/chatito/rasa_dataset_training.json'),\n out_file=os.path.join(\n os.path.dirname(__file__),\n '../rasa/chatito/nlu.md'),\n output_format=\"md\",\n language=\"\")\n print('DONE CONVERT TO RASA MD')\n\n if ARGS.train:\n train(\n domain=os.path.join(\n os.path.dirname(__file__),\n '../rasa/domain.yml'),\n config=os.path.join(\n os.path.dirname(__file__),\n '../rasa/config.yml'),\n training_files=[\n os.path.join(\n os.path.dirname(__file__),\n '../rasa/data/nlu.md'),\n os.path.join(\n os.path.dirname(__file__),\n '../rasa/data/stories.md'),\n os.path.join(\n os.path.dirname(__file__),\n '../rasa/chatito/nlu.md')],\n output=os.path.join(os.path.dirname(__file__), '../rasa/models')\n )", "def run():\n entry_point.run()", "def run():\n app = Application()\n #app.sentry_client = AsyncSentryClient(app.settings['sentry_url'])\n http_server = HTTPServer(app, xheaders=True)\n http_server.listen(options.port)\n print('Running on port %d' % options.port)", "def entry_point():", "def entry_point():", "def entry_point():", "def main(config: str):\n application = Application(config_path=config)\n application.run()", "def main(args=None):\n app()\n return 0", "def run():\n app.run()", "def main():\n tng.api.runner()", "def main() -> None:\n try:\n config = Config.load_config()\n asyncio.run(App(config=config, no_history=False).run())\n except ClientError:\n raise\n except Exception as e:\n raise Bug(str(e))", "def main():\n import sys\n FILES.extend(sys.argv[1:])\n app.debug = True\n app.run(port=5001, threaded=False)", "def main(_config, _run):\n sacred.commands.print_config(_run)\n dump_config_and_makefile()\n prepare_and_train()", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def main() -> None:\r\n parser = argparse.ArgumentParser(description='Recommender Room',\r\n prefix_chars='-')\r\n \r\n parser.add_argument('-v', '--verbose', required=False, action='store_true',\r\n help='display additional information to the terminal')\r\n \r\n parser.add_argument('-o', '--host', required=False, default=rec.HOST,\r\n help='select hostname or IP address for the web application')\r\n \r\n parser.add_argument('-p', '--port', type=int, required=False, default=rec.PORT,\r\n help='select port for the web application')\r\n \r\n parser.add_argument('-cfg', '--configuration', dest='config', required=False,\r\n choices=CONFIG_CHOICES, default='debug', \r\n help='set the appropriate configuration settings for the web application')\r\n \r\n args = parser.parse_args() \r\n rec_api.start_server(args)", "def main():\n print(\"def main\")\n return APP.run()", "def rest_main(*args) -> None:\n\n if not args:\n args = sys.argv\n\n parser = argparse.ArgumentParser(args)\n parser.add_argument(\n \"-c\",\n \"--config\",\n type=str, help=\"alternate config file\",\n default=None\n )\n parser.add_argument(\n \"--no-https\",\n help=\"Turn off HTTPS\",\n default=False, action='store_true',\n )\n userargs = parser.parse_args()\n\n config_file = sq_get_config_file(userargs.config)\n app = app_init(config_file)\n cfg = load_sq_config(config_file=config_file)\n try:\n api_key = cfg['rest']['API_KEY']\n except KeyError:\n print('missing API_KEY in config file')\n exit(1)\n\n logcfg, loglevel = get_log_config_level(cfg)\n\n no_https = cfg.get('rest', {}).get('no-https', False) or userargs.no_https\n\n srvr_addr = cfg.get('rest', {}).get('address', '127.0.0.1')\n srvr_port = cfg.get('rest', {}).get('port', 8000)\n\n if no_https:\n uvicorn.run(app, host=srvr_addr, port=srvr_port,\n )\n else:\n ssl_keyfile, ssl_certfile = get_cert_files(cfg)\n uvicorn.run(app, host=srvr_addr, port=srvr_port,\n ssl_keyfile=ssl_keyfile,\n ssl_certfile=ssl_certfile)", "def main():\n CLI_APP.run()", "def main():\n app.run(debug=True)", "def main(argv=None):\n # __package__ should be `development.main`\n run_example_local('examples.run_rl', argv)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'rovername',\n help='required, name of the rover to connect to',\n nargs='?',\n default=None\n )\n args = parser.parse_args()\n\n if args.rovername is None:\n print('usage:', sys.argv[0], '<rovername>')\n exit(-1)\n\n rovername = args.rovername\n swarmie = Swarmie(rovername)\n\n planner = Planner(swarmie, use_rviz_nav_goal=True)\n rospy.spin()", "def main():\n args = utils.parse_arguments()\n logging.basicConfig(level=logging.INFO)\n coloredlogs.install(level=0,\n fmt=\"[%(asctime)s][%(levelname)s] [%(name)s.%(funcName)s:%(lineno)d] %(message)s\",\n isatty=True)\n if args.debug:\n l_level = logging.DEBUG\n else:\n l_level = logging.INFO\n\n logging.getLogger(__package__).setLevel(l_level)\n\n LOG.info('RUNNING TAMAGO WEB')\n serve(app, port=8080, host='0.0.0.0')", "def run():\n import argparse\n\n parser = argparse.ArgumentParser(description='Phovea Server')\n parser.add_argument('--use_reloader', action='store_true', help='whether to automatically reload the server')\n parser.add_argument('--env', default=cc.get('env'), help='environment mode (dev or prod)')\n\n # parse before to enable correct plugin discovery\n args = parser.parse_known_args()[0]\n if args.env.startswith('dev'):\n enable_dev_mode()\n else:\n enable_prod_mode()\n\n # resolve the default command to decide which application to launch\n default_command = _resolve_commands(parser)\n if default_command is not None:\n # set a default subparse to extract the defined arguments from the instance to the main arguments (?)\n set_default_subparser(parser, default_command)\n\n args = parser.parse_args()\n\n _set_runtime_infos(args)\n\n main = args.launcher(args) # execute the launcher function, which returns another function\n\n if args.use_reloader:\n _log.info('start application using reloader...')\n run_with_reloader(main, extra_files=_config_files())\n else:\n _log.info('start application...')\n main()", "def main():\n parser = argparse.ArgumentParser(description='Creates a Mist site within your organization')\n parser.add_argument('config', metavar='config_file', type=argparse.FileType(\n 'r'), help='file containing all the configuration information')\n args = parser.parse_args()\n configs = json.load(args.config)\n\n claim_ap(configs)", "def run():\n\n args = parse_arguments()\n app = rummage_app.RummageApp(args)\n app.MainLoop()\n\n return 0", "def entry_point():\n pass", "def run():\n main()", "def main():\n app = App()\n app.run()", "def main():\n app = RunSnakeRunApp(0)\n app.MainLoop()", "def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()", "def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()", "def main():\n sdoc_application = SDocApplication()\n sdoc_application.run()", "def main():\n cli = CommandLineInterface(NAME, package=\"nemo_nowcast\", description=__doc__)\n cli.build_parser()\n parsed_args = cli.parser.parse_args()\n config = Config()\n config.load(parsed_args.config_file)\n msg = _configure_logging(config)\n logger.info(f\"running in process {os.getpid()}\")\n logger.info(f\"read config from {config.file}\")\n logger.info(msg)\n run(config)", "def main():\n opt = parse_opts()\n run(opt)", "def main():\n opt = parse_opts()\n run(opt)", "def main():\n return run_server(**parse_server_args())", "def main():\n my_emr = EmrProcessing()\n\n if \"-s\" in sys.argv:\n my_emr.verbose_mode = False\n else:\n my_emr.verbose_mode = True\n print \"\\nStarting Titanic Data Analysis\"\n my_emr.parse_user_selections()\n\n # Setup\n my_emr.clear_local_output_directory()\n my_emr.update_mapper_file(\"model2\")\n\n # S3 activities\n my_emr.empty_bucket()\n my_emr.create_and_fill_bucket()\n\n # EMR activities\n my_emr.setup_and_run_job()\n my_emr.wait_until_job_completes()\n\n # Cleanup\n my_emr.download_output_files()\n my_emr.post_process_output_file()\n if my_emr.verbose_mode:\n my_emr.print_local_output_files_stats()", "def entry_point() -> None:\n args = parse_args()\n print(hello(args.name))", "def main(args):", "def main(args):", "def main(\n host: str = typer.Option(\"127.0.0.1\", help=\"IP to run the API on\"),\n port: int = typer.Option(8000, help=\"Port to run the API on\"),\n):\n typer.echo(\"🦄 Starting with uvicorn...\")\n typer.echo(\n \"💡 Check out the API docs at \"\n + typer.style(f\"http://{host}:{port}/docs\", bold=True)\n )\n typer.echo(\"-\" * 80)\n uvicorn.run(app, host=\"127.0.0.1\", port=8000)", "def main(run_app, config_filename=None):\n\n if not config_filename:\n parser = ThrowingArgumentParser(description='Provide a RESTful API service from the order database.')\n parser.add_argument('config_file', help='JSON configuration file', type=argparse.FileType('r'))\n args = parser.parse_args()\n\n app = create_app(args.config_file.name)\n\n else:\n app = create_app(config_filename)\n\n if 'flask-debug' in app.config:\n do_debug = app.config['flask-debug']\n else:\n do_debug = False\n\n if run_app:\n if app.config['listening_ip'] and app.config['listening_port']:\n app.run(host=app.config['listening_ip'], port=app.config['listening_port'], debug=do_debug)\n else:\n # expect a nginx environment\n app.run(debug=do_debug)", "def main():\n options = lib.main.parse_args()\n\n #Initialize all the loggings with the options specified.\n lib.main.logs_initialization(options)\n logging.debug(\"Logs are now enabled and working\")\n\n #Update the main config file with the app information.\n logging.debug(\"Updating parameters on config files\")\n lib.config.update_params()\n\n # Finally, when all the initialization schedule is completed, Flask\n # will start.\n logging.debug(\"Calling Flask initializator function\")\n api.start(options[\"debug\"])", "def entry_point():\n\n\n plac.call(main)", "def main(args=None):", "def main(args=None):", "def create_and_run():\n\n app = App()\n app.run()", "def runserver():\n app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG, threaded=config.THREADED)", "def main():\n sys.exit(RBExt().run(sys.argv[1:]))", "def entry_point() -> int:\n return run(argv=sys.argv[1:], stdout=sys.stdout, stderr=sys.stderr)", "def run():\n\n call_args = sys.argv[1:]\n main(call_args)", "def main():\n greetings()\n run_jarvis()", "def main():\n logger.info(\"Running main function...\")\n args = get_args()\n # rose suite default location\n if args.suite_dir:\n default_suite = args.suite_dir\n rose_config_template = os.path.join(default_suite, \"rose-suite.conf\")\n\n # get command line arguments\n recipe_files = args.recipe_files\n config_file = args.config_file\n main_dir = args.main_dir\n log_level = args.log_level\n\n # setup rose suite\n run_rose = _setup_work(rose_config_template, recipe_files,\n config_file, main_dir, default_suite, log_level)\n\n # submit to cylc\n if not args.no_submit:\n _run_suite(run_rose)", "def main(mode, discord_token, discord_channel, asana_token, asana_workspace, sentry_url):\n \n logger = logger_pick(mode, sentry_url) \n linker = Linker() # A class that links the discord bot with the asana listeners\n\n # Start project_starter thread\n project_starter_thread = threading.Thread(\n target=project_starter,\n kwargs={\n 'linker': linker,\n 'logger': logger,\n 'asana_token': asana_token,\n 'asana_workspace': asana_workspace\n }\n )\n project_starter_thread.start()\n\n # Start AsanaBot thread\n bot_thread = threading.Thread(\n target=retry_wrapper,\n kwargs={\n 'target': AsanaBot.start,\n 'target_type': \"bot\",\n 'linker': linker,\n 'logger': logger,\n 'discord_token': discord_token,\n 'discord_channel': discord_channel,\n 'asana_token': asana_token,\n 'asana_workspace': asana_workspace\n }\n )\n bot_thread.start()\n\n logger.info('All the threads are running.')", "def run():\n # main(sys.argv[1:])\n main()", "def run():\n REDIRECT = False\n LOG_FILE = \"truss.log\"\n app = App(REDIRECT)\n app.MainLoop()", "def run_main():\n main(sys.argv)", "def run(self, args: argparse.Namespace) -> None:\n pass", "def runserver(args):\n elmrapp.run()\n return \"\"", "def main(args=None):\n pass", "def main():\n\tcli = Cli()\n\tcli.run()", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def train_entry_point():", "def start():\n trio.run(_main)", "def runner() -> None:\n # obtain our configuration from the environment\n config = from_environment(EXPECTED_CONFIG)\n # configure logging for the application\n log_level = getattr(logging, str(config[\"LOG_LEVEL\"]).upper())\n logging.basicConfig(\n format=\"{asctime} [{threadName}] {levelname:5} ({filename}:{lineno}) - {message}\",\n level=log_level,\n stream=sys.stdout,\n style=\"{\",\n )\n # create our Bundler service\n bundler = Bundler(config, LOG) # type: ignore[arg-type]\n # let's get to work\n bundler.logger.info(\"Adding tasks to asyncio loop\")\n loop = asyncio.get_event_loop()\n loop.create_task(work_loop(bundler))", "def run():\n app.run(debug=True, port=5001)", "def main(call_args):\n\n args = parse_args(call_args)\n init_logging(args.loglevel)\n\n _log.info(\"'scrape-jobs' called with args: %s\", args)\n runner.run_with_config_file(args.site, args.config_file)", "def main():\n args = parse_args()\n process_args(args)", "def main():\n logging.basicConfig(\n level=logging.INFO,\n format=\"%(levelname)s:%(module)s:%(filename)s:%(lineno)s:%(message)s\",\n )\n\n logger = logging.getLogger(__name__)\n\n args = _parse_args()\n config = args.config\n\n start_time = datetime.datetime.utcnow()\n logger.info(\n json.dumps(\n {\n \"msg\": \"Starting BaseballClerk.\",\n \"subreddits\": list(config[\"subreddits\"].keys()),\n \"start_time\": start_time.isoformat(),\n }\n )\n )\n\n # Connect the datastore and create tables if not existing.\n datastore.connect(\"BaseballClerk.db\")\n EVENTS.create_if_needed()\n COMMENTS.create_if_needed()\n\n for game_thread in baseballbot.active_game_threads():\n subreddit_config = config[\"subreddits\"].get(\n game_thread[\"subreddit\"][\"name\"]\n ) # type: dict\n if not subreddit_config:\n continue\n\n logger.info(\n json.dumps(\n {\n \"msg\": \"Running game thread.\",\n \"subreddit\": game_thread[\"subreddit\"][\"name\"],\n \"game_pk\": game_thread[\"gamePk\"],\n }\n )\n )\n\n reddit = praw.Reddit(subreddit_config[\"praw_bot\"])\n\n game_pk = game_thread[\"gamePk\"]\n gamechat = reddit.submission(game_thread[\"postId\"])\n\n play_by_play(game_pk, gamechat)\n exit_velocities(game_pk, gamechat)\n due_up(game_pk, gamechat)\n\n time.sleep(2)\n\n for subreddit_config in config[\"subreddits\"].values():\n praw_bot = subreddit_config[\"praw_bot\"]\n reddit = praw.Reddit(praw_bot)\n\n logger.info(\n json.dumps(\n {\"msg\": \"Running replies.\", \"subreddit\": subreddit_config[\"name\"]}\n )\n )\n\n for item in reddit.inbox.unread():\n # Make sure it is fresh.\n created_utc = datetime.datetime.fromtimestamp(item.created_utc)\n if (datetime.datetime.utcnow() - created_utc).seconds > 600:\n item.mark_read()\n continue\n\n if isinstance(item, Comment) and praw_bot.lower() in item.body.lower():\n key = f\"textface-{item.id}\"\n cmnt = comment.default_mention_reply(\n item, subreddit_config[\"default_replies\"]\n )\n COMMENTS[key] = cmnt\n\n item.mark_read() # Keep the inbox clean.\n\n end_time = datetime.datetime.utcnow()\n elapsed = (end_time - start_time).total_seconds()\n logger.info(\n json.dumps(\n {\n \"msg\": \"Finished BaseballClerk.\",\n \"subreddits\": list(config[\"subreddits\"].keys()),\n \"start_time\": start_time.isoformat(),\n \"end_time\": end_time.isoformat(),\n \"elapsed\": elapsed,\n }\n )\n )", "def startapp():", "def main():\n docopt = docoptinit(__doc__)\n logging.basicConfig(level=logging.INFO,\n format='[%(asctime)s] [%(levelname)s] [ %(filename)s:%(lineno)s - %(name)s ] %(message)s ')\n logging.info('basic config')\n # qb.set_logger(__file__, debug=docopt['--debug'])\n host = docopt['--host']\n port = int(docopt['--port'])\n if not (1 <= port <= 65535):\n raise Exception('port must be 1-65535')\n\n global verbose\n verbose = int(docopt['--verbose'])\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(start_warp_server(host, port))\n loop.run_forever()\n except OSError:\n pass\n except KeyboardInterrupt:\n print('bye')\n finally:\n loop.close()", "def main():\n\n r = Reddit('{} Discord Announcer v1.0 - /u/{}'.format(settings.NETWORK_NAME, settings.USERNAME))\n r.oauth()\n\n b = DiscordBot(r)\n b.run()", "def main(argv: Optional[List[str]] = None) -> None: # pragma: no cover, proxy\n if argv is None:\n argv = sys.argv[1:]\n\n app = reducto.Reducto()\n app.run(argv)", "def Run(self):\n self.BuildWebAppSite()\n\n self.BuildRPCSite(self.env.umpire_cli_port, self.methods_for_cli, '0.0.0.0')\n self.BuildRPCSite(self.env.umpire_rpc_port, self.methods_for_dut)\n\n # Start services.\n reactor.callWhenRunning(self.OnStart)\n # And start reactor loop.\n reactor.run()", "def start(builder):\n global rss_builder\n rss_builder, sys.argv = builder, []\n app.run()", "def run(self, args):\n pass", "def main():\n\n run_lap()", "def main(args=None):\n\n program = Program(\n name='Ansible Customer Invoke taks to run \"ansible-galaxy\" commands',\n namespace=Collection.from_module(ansible_galaxy_tasks),\n version='0.1.0-alpha+001')\n\n program.run(args)", "def main(args: argparse.Namespace, config: Config) -> None:\n # Notes:\n # - 1878 is the number of unique answers from the GQA paper\n # - 1843 is the number of answers across train, val and testdev\n\n # Download and initialise resources\n print(colored(\"initialisation:\", attrs=[\"bold\"]))\n stanza.download(lang=\"en\", dir=\".stanza\")\n\n # Print environment info\n print(colored(\"environment:\", attrs=[\"bold\"]))\n cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if cuda else \"cpu\")\n print(f\"device: {torch.cuda.get_device_name(device) if cuda else 'CPU'}\")\n print(config)\n\n if args.job == JobType.PREPROCESS:\n preprocess(config)\n elif args.job in (JobType.TRAIN, JobType.PREDICT):\n resume = None\n if args.resume != \"\":\n run_id, checkpoint = args.resume.split(\":\")\n resume = ResumeInfo(run_id, checkpoint)\n if args.job == JobType.TRAIN:\n train(config, device, resume)\n else:\n predict(config, device, resume)\n else:\n raise NotImplementedError()", "def run(self):\n self.app.run()", "def run(self):\n self.app.run()", "def run(self, args):\n\n return", "def main():\n\ttoken = os.getenv(\"BOT_TOKEN\")\n\tapplication = Application.builder().token(token).read_timeout(30).write_timeout(30).build()\n\tload_interactions(application)\n\tprint(\"Simple Media Converter instance started!\")\n\tapplication.run_polling()", "def main():\n\n\n\n\tdaemon = ORsched(scheduler_config.pidfile, stdout=scheduler_config.outstream, stderr=scheduler_config.outstream)\n\ttry:\n\t\topts, list = getopt.getopt(sys.argv[1:], 'st')\n\texcept getopt.GetoptError, e:\n\t\tprint(\"Bad options provided!\")\n\t\tsys.exit()\n\n\tfor opt, a in opts:\n\t\tif opt == \"-s\":\n\t\t\ttry:\n\t\t\t\tpid_number = open(scheduler_config.pidfile,'r').readline()\n\t\t\t\tif pid_number:\n \t\t\t\tsys.exit('Daemon is already running!')\n \t\texcept Exception, e:\n \t\t\tpass\n\n\t\t\tprint(\"Starting daemon...!\")\n\t\t\tdaemon.start()\n\t\telif opt == \"-t\":\n\t\t\tdaemon.stop()\n\t\t\tprint \"The daemon is stoped!\"\n\t\telse:\n\t\t\tprint(\"Option %s not supported!\" % (opt))", "def main():\n parser = argparse.ArgumentParser(description=main.__doc__,\n formatter_class=argparse.RawTextHelpFormatter)\n\n parser.add_argument('-1',\n dest='r1',\n help='Path to first fasta file.')\n\n parser.add_argument('-2',\n dest='r2',\n help='Path to paired fasta file.')\n\n args = parser.parse_args()\n salmon(args.r1, args.r2)", "def main():\n # get the params in format\n params = {key: value for key, value in demisto.params().items() if value is not None}\n\n LOG(f'Command being called is {demisto.command()}')\n try:\n if params.get('initial_interval') and int(params.get('initial_interval')) > 7: # type: ignore\n raise ValueError(\n f\"Retroactive timeline should be within 7 days, given value: {params.get('initial_interval')}\")\n\n client = Client(params)\n args = demisto.args()\n\n if demisto.command() == 'test-module':\n if not args.get('collection', False):\n args['collection'] = params.get('collection', '')\n return_results(get_test_response(client, args))\n\n elif demisto.command() == 'fetch-indicators':\n # fetch indicators using taxii service\n indicators = fetch_indicators(client)\n # we submit the indicators in batches\n for b in batch(indicators, batch_size=2000):\n demisto.createIndicators(b)\n\n elif demisto.command() == 'cyble-vision-fetch-taxii':\n # fetch indicators using taxii service\n validate_input(args)\n return_results(cyble_fetch_taxii(client, args))\n\n elif demisto.command() == 'cyble-vision-get-collection-names':\n # fetch collections using taxii service\n return_results(get_feed_collection(client))\n\n # Log exceptions\n except Exception as e:\n return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')", "def main():\n parser = argparse.ArgumentParser(\n description='A testbench for the Google Cloud C++ Client Library')\n parser.add_argument('--host', default='localhost',\n help='The listening port')\n parser.add_argument('--port', help='The listening port')\n # By default we do not turn on the debugging. This typically runs inside a\n # Docker image, with a uid that has not entry in /etc/passwd, and the\n # werkzeug debugger crashes in that environment (as it should probably).\n parser.add_argument('--debug', help='Use the WSGI debugger',\n default=False, action='store_true')\n arguments = parser.parse_args()\n\n # Compose the different WSGI applications.\n application = wsgi.DispatcherMiddleware(root, {\n '/httpbin': httpbin.app,\n GCS_HANDLER_PATH: gcs,\n UPLOAD_HANDLER_PATH: upload,\n })\n serving.run_simple(arguments.host, int(arguments.port), application,\n use_reloader=True, use_debugger=arguments.debug,\n use_evalex=True)", "def main(debug=True):\n app.debug = debug\n app.run(host='0.0.0.0')", "def start():\n app.run()", "def _main():\n parser = _create_parser()\n args = parser.parse_args()\n\n if args.interval is None:\n args.interval = 10\n\n if args.what_if is None:\n args.what_if = False\n\n loop = asyncio.get_event_loop()\n\n params = {\n \"connection_string\": args.connection_string,\n \"name\": args.name,\n \"interval\": args.interval,\n \"what_if\": args.what_if\n }\n\n loop.run_until_complete(_run(params))", "def quickstart(*args, **kwargs):\n\n setup(*args, **kwargs)\n update_site(*args, **kwargs)\n restart_site(*args, **kwargs)", "def main(args):\n run_tracks.append({\n 'file': args.file,\n 'test': args.test,\n 'verbose': args.verbose,\n })" ]
[ "0.75828344", "0.6596486", "0.6327994", "0.6202171", "0.6101385", "0.6101385", "0.6101385", "0.60364085", "0.6035912", "0.603584", "0.603292", "0.6017248", "0.599784", "0.5949028", "0.5909721", "0.5909721", "0.5909721", "0.5909721", "0.5909721", "0.5909721", "0.5909721", "0.5909721", "0.5884335", "0.58633476", "0.5854781", "0.5852969", "0.5850719", "0.58491945", "0.58389074", "0.5835291", "0.58284414", "0.58247215", "0.5824568", "0.5811274", "0.58103293", "0.5809378", "0.5809099", "0.5779153", "0.5779153", "0.5773938", "0.5769941", "0.57540053", "0.57540053", "0.5749754", "0.57094055", "0.56993", "0.5698433", "0.5698433", "0.56962043", "0.5691249", "0.56790334", "0.56732106", "0.56675047", "0.56675047", "0.5657681", "0.56533647", "0.5619232", "0.56087476", "0.560188", "0.5587927", "0.5585133", "0.558024", "0.55752856", "0.5553645", "0.55494845", "0.5547475", "0.55433756", "0.5537554", "0.5533992", "0.55321264", "0.5530243", "0.55294424", "0.5529176", "0.5528377", "0.55143785", "0.5509222", "0.55084467", "0.5490997", "0.5485338", "0.54812557", "0.5477971", "0.54751575", "0.54692274", "0.54649353", "0.54645896", "0.54621154", "0.54600745", "0.54393345", "0.54393345", "0.5418717", "0.5416405", "0.5411685", "0.5410989", "0.5395075", "0.53919446", "0.53890014", "0.5388121", "0.5384823", "0.5376433", "0.5372062" ]
0.6522449
2
return ngrams as single spaceseparated strings with their occurences
def get(self, n): parts = [' '.join(g) for g in ngrams(self.words, n)] return get_occurences(parts)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _ngrams(self, string_):\n def find_ngrams(input_list, n):\n return zip(*[input_list[i:] for i in range(n)])\n\n ngrams = []\n tokens = string_.split()\n\n for size in range(1, self._ngram_range + 1):\n tuples = find_ngrams(tokens, size)\n concatenated = [\"_\".join(tuple_) for tuple_ in tuples]\n ngrams.extend(concatenated)\n\n return \" \".join(ngrams)", "def n_grama(sentence, n):\n t = tokenize(sentence)\n n_grams = zip(*[t[i:] for i in range(n)])\n return list(map(lambda n_gram: ' '.join(n_gram), n_grams))", "def ngrams(text, n):\n grams = zip(*[text[i:] for i in range(n)])\n return [''.join(gram) for gram in grams]", "def ngrams(self):\n return self.__ngrams(())", "def get_ngrams(seq, n):\n return", "def n_gram(list, n):\n ngrams = zip(*[list[i:] for i in range(n)])\n return [\" \".join(ngram) for ngram in ngrams]", "def ngrams(word, n):\n word = list(word)\n # insert extra tokens\n word.insert(0, '$')\n word.append('$')\n\n output = []\n for i in range(len(word) - n + 1):\n # print(i)\n # print(word[i:i + n])\n output.append(''.join(word[i:i + n]))\n return output", "def ngrams(words, n=1):\n return [tuple(words[j:j + n]) for j in range(len(words) - n + 1)]", "def get_ngrams(s, ngram_range=1):\n # tokens = s.split()\n # return filter(lambda token: len(token)>1, tokens)\n # return bigrams(s.split()) # NLTK bigrams method\n words = s.split()\n return [' '.join(words[i:i+ngram_range]) for i in range(len(words)-1)]", "def word2ngrams(text, n=3):\n return [text[i:i+n] for i in range(len(text)-n+1)]", "def ngrams(name_string, n=3):\n\n string = re.sub(r'[,-./]|\\sBD', r'', name_string)\n n_grams = zip(*[string[i:] for i in range(n)])\n return [''.join(n_gram) for n_gram in n_grams]", "def ngrams_(text, n):\n return zip(*[text[i:] for i in range(n)])", "def word_ngrams(sent, n):\n\t\n\t# EXAMPLES \n\t# > word_ngrams(tokenize('hello world'), 1)\n\t# [('hello',), ('world',)]\n\t# > word_ngrams(tokenize('hello world'), 2)\n\t# [('<s>', 'hello'), ('hello', 'world'), ('world', '</s>')]\n\n\t# YOUR CODE HERE\n\ttokenized_sent = tokenize(sent)\n\tif n != 1:\n\t\ttokenized_sent.insert(0, '<s>')\n\t\ttokenized_sent.append('</s>')\n\treturn [tuple(tokenized_sent[i:i + n]) for i in range(0, len(tokenized_sent)-n+1)]", "def ngramas(n, string):\n\n ngrams = []\n i = 0\n while i + n < len(string):\n ngrams.append(string[i:i + n])\n i += 1\n\n return ngrams", "def sent2ngrams(text, n=3):\n if n == \"word\":\n return text.split()\n return list(chain(*[word2ngrams(i,n) for i in text.lower().split()]))", "def ngrams(text, n):\n return chain(*[ngrams_(text, i) for i in range(n + 1)])", "def ngramify(word: str,\n max_ngram_size: int):\n chars = list(word)\n ngram_tuples = []\n ngrams = []\n # Take all n-grams with length in [2, max_ngram_size]\n for i in range(2, max_ngram_size + 1):\n pw = list(pairwise(chars, i))\n ngram_tuples += pw\n for e in ngram_tuples:\n if None not in e:\n ngrams.append(''.join(e))\n # Append the list of characters, to avoid an unnecessary call for pairwise(_, 1).\n return list(word) + ngrams", "def get_ngrams(tokens, min_n, max_n):\n all_ngrams = list()\n n_tokens = len(tokens)\n for i in range(n_tokens):\n for j in range(i + min_n, min(n_tokens, i + max_n) + 1):\n all_ngrams.append(\" \".join(tokens[i:j]))\n return all_ngrams", "def ngramize(items: List[str], ngram_range=(1, 1)) -> Generator[List[str], Any, None]:\n\n ngrams = []\n ranges = [(0, i) for i in range(ngram_range[0], ngram_range[1] + 1)]\n for r in ranges:\n ngrams += list(zip(*[items[j:] for j in range(*r)]))\n\n formatted_ngrams = [' '.join(item) for item in ngrams]\n\n yield formatted_ngrams", "def get_nnp_ngrams(original_text, highlight=4, minsize=0):\n minsize = minsize-1\n if minsize<0:\n minsize = 0 \n \n tokens = nltk.wordpunct_tokenize(original_text)\n tagged = nltk.word_tokenize(original_text)\n tagged = nltk.pos_tag(tokens)\n #for word in tagged:\n # print word\n doc_length = len(tokens)\n counter = 0\n counter2 = 0\n if highlight==0:\n concated_test = doc_length # This is set to doc_length but could be anything recommend 3.\n else:\n concated_test = highlight\n list_of_NNPs = []\n while counter < (doc_length-1):\n while counter2 < concated_test:\n counter2 = counter2+1\n counter3 = 0\n #print '--------------------'\n temp_array = []\n all_nnp = True\n while counter3 < counter2:\n if counter < (doc_length-counter3):\n #print tokens[counter+counter3],tagged[counter+counter3][1]\n temp_array.append(tokens[counter+counter3])\n if tagged[counter+counter3][1] != 'NNP':\n all_nnp = False\n counter3 = counter3+1\n counter3 = 0\n if all_nnp == True:\n if(len(temp_array)>minsize):\n list_of_NNPs.append(temp_array)\n #print 'added to main array'\n #else:\n #print 'not all NNPs'\n counter2 = 0\n counter = counter+1\n #for ngram in list_of_NNPs:\n # print ngram\n import itertools\n list_of_NNPs.sort()\n unique_NNPs = list(list_of_NNPs for list_of_NNPs,_ in itertools.groupby(list_of_NNPs))\n return unique_NNPs", "def n_grams(tokens, n):\n return zip(*[tokens[i:] for i in range(n)])", "def build_ngrams(tokens, n=2):\n ngrams = zip(*(islice(group, idx, None) for idx, group in enumerate(tee(tokens, n))))\n return ngrams", "def extract_n_grams(string, n, overlap=0, unique=True):\n if n > len(string):\n return [] \n\n n_grams = []\n step = n - overlap\n n_grams = [string[i:i+n] for i in range(0, len(string)-step+1, step)]\n\n if unique:\n n_grams = list(set(n_grams))\n\n n_grams = [g+\"\\n\" for g in n_grams]\n\n return n_grams", "def ngrams(sequence, n):\n # credit: http://stackoverflow.com/questions/2380394/simple-implementation-of-n-gram-tf-idf-and-cosine-similarity-in-python\n sequence = list(sequence)\n count = max(0, len(sequence) - n + 1)\n return [tuple(sequence[i:i+n]) for i in range(count)]", "def get_grams(candidate, n):\n words = candidate.split(' ')\n # print(words)\n grams = list()\n for i in range(len(words) - n + 1):\n # print(words[i:i+n])\n grams.append(' '.join(words[i:i+n]))\n return grams", "def ngrams(word, size):\n expanded = \"^\" + word + \"$\"\n for start in range(len(expanded) - size + 1):\n yield expanded[start:start + size]", "def ngramify(corpus, n):\r\n unlist = 0\r\n if type(corpus[0]) is not list:\r\n corpus = [corpus]\r\n unlist = 1\r\n new_corpus = []\r\n for line in corpus:\r\n new_line = []\r\n for gram in range(len(line) - n + 1):\r\n new_gram = \"\"\r\n for i in range(n):\r\n if i != 0:\r\n new_gram += \" \"\r\n new_gram += line[gram + i]\r\n new_line.append(new_gram)\r\n new_corpus.append(new_line)\r\n if unlist:\r\n return new_corpus[0]\r\n return new_corpus", "def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):\n\n def _skip(gram):\n if not filter_fn:\n return False\n return filter_fn(gram)\n\n words = self.words(True)\n ngrams = [(s, e + 1)\n for s in range(len(words))\n for e in range(s, min(s + n, len(words)))\n if not _skip(words[s:e + 1])]\n\n # Concatenate into strings\n if as_strings:\n ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]\n\n return ngrams", "def create_ngrams(self, tokens):\n ngrams = []\n for i in range(len(tokens)- self.N + 1):\n ngrams.append(tuple(tokens[i:i+self.N]))\n return ngrams", "def _generateNgrams(self,text,n=2):\n token = Utilities.CVTokeniser(text)\n # token = nltk.word_tokenize(text)\n computedNgrams=ngrams(token,n)\n return Counter(computedNgrams)", "def transform_ngrams(self, words):\n return words if self.n_grams == 1 else [self.connector.join(words[i:i + self.n_grams]) for i in range(len(words) - self.n_grams + 1)]", "def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):\n def _skip(gram):\n if not filter_fn:\n return False\n return filter_fn(gram)\n\n words = self.words(uncased)\n ngrams = [(s, e + 1)\n for s in range(len(words))\n for e in range(s, min(s + n, len(words)))\n if not _skip(words[s:e + 1])]\n\n # Concatenate into strings\n if as_strings:\n ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]\n\n return ngrams", "def get_ngrams(s, ngmin=1, ngmax=1, tokenizer=list, separator=\"|\"):\n ngrams = [[] for x in range(ngmin, ngmax + 1)]\n s = tokenizer(s)\n for i, ch in enumerate(s):\n for ngsize in range(ngmin, ngmax + 1):\n if (i + ngsize) <= len(s):\n ngrams[ngsize - 1].append(separator.join(s[i:i+ngsize]))\n return ngrams", "def _create_ngrams(tokens, n):\n\n ngrams = collections.Counter()\n for ngram in (tuple(tokens[i:i + n]) for i in xrange(len(tokens) - n + 1)):\n ngrams[ngram] += 1\n return ngrams", "def n_grams(tokens, n=1):\n shiftToken = lambda i: (el for j,el in enumerate(tokens) if j>=i)\n shiftedTokens = (shiftToken(i) for i in range(n))\n tupleNGrams = zip(*shiftedTokens)\n return tupleNGrams", "def __makeNgrams(self, n):\n # start_time = time.time()\n ngrams = dict()\n itergrams = dict()\n\n for k in range(2,n+1):\n itergrams[k] = list(nltk.ngrams(self.words, k))\n\n for k, grams in itergrams.items():\n kgrams = defaultdict(Counter)\n for gram in grams: \n kgram = list(gram)\n key = ' '.join(kgram[:k-1])\n kgrams[key].update({kgram[-1]})\n ngrams[k] = kgrams\n # print ('finish gen ', k, 'grams at ', time.time()-start_time)\n return ngrams", "def ngrams(self):\n return self.root.ngrams()", "def find_ngrams(self, n):\n\n output = {}\n\n for i in range(len(self.text)-n+1):\n s = ' '.join(self.text[i:i+n])\n # if s is not already in dictionary, set value to 0\n output.setdefault(s, 0)\n output[s] += 1\n return output", "def compute_ngrams(str, n=2):\n #split the string\n st = tuple(str.split())\n #not list of list but list of tuple..\n gram = [st[x:x+n] for x in range(len(st)-n+1)]\n dgram = {}\n #convert ngram into dictionary:\n for key in gram:\n dgram.setdefault(key[0],[]).append(key[1:])\n return dgram", "def get_ngrams(sequence, n):\n length = len(sequence)\n #if only require 1-gram, then we need to add one START and one END to the sequence. \n if n==1 or n==2:\n sequence=[\"START\"]*n+sequence+[\"STOP\"]\n end = n+1 #end i means that when n==1, we need to read one more data, that is to the end of sequence, which is slightly different from when n>1.\n #if require multi-grams, use the common calculation below.\n else:\n sequence = [\"START\"]*(n-1)+sequence+[\"STOP\"]\n end = 1\n if n==2:\n end = n\n result = []\n temp = ()\n #the process to construct the tuple-based array.\n for i in range(0,length+end):\n temp = tuple(sequence[i:i+n])\n\n result.append(temp)\n return result", "def find_all_ngrams(dataset, n):\n return zip(*[dataset[i:] for i in xrange(n)])", "def iter_ngrams(self, sentence, n):\n return [tuple(sentence[i : i+n]) for i in range(len(sentence)-n+1)]", "def ngram(self,phrase,n,unity=\"w\"):\n return self._support.ngram(phrase,n,unity)", "def get_all_ngrams():\n grams = ()\n for i in range(0, 40):\n text_i = read_file(str(i))\n curr_grams = ngramize(text_i, n)\n grams = chain(grams, curr_grams)\n return grams", "def n_grammize(role):\n ngrams = []\n if isinstance(role,str):\n role = role.lower()\n role = role.split()\n if len(role)>2:\n for i in range(2, len(role)):\n ngrams.append((role[i-2], role[i-1], role[i]))\n if len(role)>1:\n for i in range(1, len(role)):\n ngrams.append((role[i-1], role[i]))\n for i in range(len(role)):\n ngrams.append(role[i])\n return ngrams", "def __tagsToNgrams__(self):\n bigrams = defaultdict(int)\n trigrams = defaultdict(int)\n for tags in self.getTags():\n tags = list(tags)\n for i in range(2):\n tags.insert(0, BEGIN)\n for k in range(2, len(tags)):\n trigrams[tuple(tags[k-2:k+1])] += 1\n bigrams[tuple(tags[k-1:k+1])] += 1\n return bigrams, trigrams", "def find_ngrams(input_list, n=3):\n return zip(*[input_list[i:] for i in range(n)])", "def create_ngrams(word_list, n):\n yield zip(*[word_list[i:] for i in range(n)])", "def _make_ngrams(self, tokenized_corpus: list, corpus_token_index: int, n: int, make_pre_word_ngram=False):\n ngrams = []\n end_i = 0 if make_pre_word_ngram else -1\n for i in range(n - 1, end_i, -1):\n ngrams.append(tokenized_corpus[corpus_token_index - i])\n return tuple(ngrams)", "def make_ngrams(texts,n,ngram_mod):\r\n return [turnmod(doc,n,ngram_mod) for doc in texts]", "def get_ngrams(sequence, n):\n\n # Set prefix and postfix\n prefix = \"START\"\n postfix = \"STOP\"\n # Append 1 prefix regardless of value of n\n sequence.insert(0, prefix)\n for i in range(n - 2) :\n sequence.insert(0, prefix)\n sequence.append(postfix)\n\n # Check if 1 <= n <= len(sequence)\n if (n > len(sequence)) :\n print(sequence)\n assert 1 <= n\n assert n <= len(sequence)\n\n # Create n-gram sequence\n n_gram_sequence = []\n for i in range(n, len(sequence) + 1) :\n temp_sequence = []\n for j in range(i - n, i) :\n temp_sequence.append(sequence[j])\n n_gram_sequence.append(tuple(temp_sequence))\n return n_gram_sequence", "def full_ngrams(items, n):\n ngs = {}\n for i in xrange(1, n+1):\n ngs.update(gen_ngrams(items, i))\n return ngs", "def sent_to_n_grams(sent: str, n: int) -> Iterator[Tuple[str, ...]]:\n\n words = [word for word in sent.split() if word not in string.punctuation]\n\n rows = [words[i:] for i in range(n)]\n\n return zip(*rows)", "def get_ngrams(self, n = None):\n if not n:\n n = self.N\n\n temp = []\n for sent in self.get_sentences():\n temp.extend(list(sent))\n\n return nltk.ngrams(temp, n)", "def ngrams(iterable, n=1):\n return zip(*(iterable[i:] for i in range(n)))", "def get_ngrams(self, s, ngmin, ngmax, separator=\"\",\n bos=\"<\", eos=\">\", suffix=\"\", flatten=True):\n\n # return a single dummy feature if there are no applicable ngrams\n # probably resulting in a mojority-class classifier\n if ngmax == 0 or (ngmax - ngmin < 0) :\n return ['__dummy__']\n\n ngrams = [[] for x in range(1, ngmax + 1)]\n s = [bos] + s + [eos]\n for i, ch in enumerate(s):\n for ngsize in range(ngmin, ngmax + 1):\n if (i + ngsize) <= len(s):\n ngrams[ngsize - 1].append(\n separator.join(s[i:i+ngsize]) + suffix)\n if flatten:\n ngrams = [ng for nglist in ngrams for ng in nglist]\n return ngrams", "def ngrammer(text, gramsize = 3, threshold = 4):\n # we need to import this in order to find the duplicates:\n import nltk\n from nltk.util import ngrams\n from collections import defaultdict\n # get ngrams of gramsize \n if type(text) != list:\n text = tokenised = nltk.word_tokenize(text)\n text = [token for token in text if token.isalnum()]\n # get ngrams of gramsize \n raw_grams = ngrams(text, gramsize)\n \n # a subdefinition to get duplicate lists in a list\n def list_duplicates(seq):\n tally = defaultdict(list)\n for i,item in enumerate(seq):\n tally[item].append(i)\n # return to us the index and the ngram itself:\n return ((len(locs),key) for key,locs in tally.items() \n if len(locs) > threshold)\n\n # use our duplication detector to find duplicates\n dupes = list_duplicates(raw_grams)\n # return them, sorted by most frequent\n return sorted(dupes, reverse = True)", "def extract_char_ngrams(s: str, n: int) -> Counter:\n return Counter([s[i:i + n] for i in range(len(s) - n + 1)])", "def get_ngrams(sequence, n):\n # Example: n = 3, length = 4\n # we want 0:3, 1:4\n\n result = []\n if n == 1:\n result.append(tuple([\"START\"]))\n result.append(tuple([\"STOP\"]))\n\n if n == 3 and len(sequence) == 1:\n result.append(tuple([\"START\",\"START\",sequence[0]]))\n result.append(tuple([\"START\",sequence[0],\"STOP\"]))\n return result\n\n if n > 1 :\n for i in range(n-1):\n result.append(tuple([\"START\"]*(n-1-i)+ sequence[:i+1]))\n result.append(tuple(sequence[-n+1:] + [\"STOP\"]))\n\n for i in range(len(sequence)-n+1):\n result.append(tuple(sequence[i:i+n]))\n return result", "def everygrams(seq):\n for n in range(1, len(seq) + 1):\n for ng in nltk.util.ngrams(seq, n):\n yield ng", "def word_to_ngrams(self, word):\n encoding = list()\n n = self.n\n if word == self.eos or word == self.sos:\n encoding.append(self.ngram_to_id[word])\n else:\n _word = '^' + word + '$'\n for i in range(len(_word) - n + 1):\n ngram = _word[i:i + n]\n if ngram in self.ngram_to_id:\n encoding.append(self.ngram_to_id[ngram])\n else:\n for ch in ngram:\n flag = 1\n if ch in self.unk_char_list:\n flag = random.randint(0, 1)\n if ch in self.ngram_to_id and flag == 1:\n encoding.append(self.ngram_to_id[ch])\n else:\n encoding.append(self.ngram_to_id['<unk>'])\n return encoding", "def get_ngram(n, sentence):\n if n == 1:\n return sentence\n \n # create phrases model to find words and ngrams that occur at least once\n ngram = Phraser(Phrases(sentence, min_count=1, threshold=1))\n\n # for bigrams and higher grams\n for i in range(3,n):\n ngram = Phraser(Phrases(ngram[sentence], min_count=1, threshold=1))\n return ngram[sentence]", "def ngrams(tokens,lang):\n stopwords = stops.stopwords[lang]\n max = len(tokens)\n ngrams = []\n left_punctuation = '!\"%&\\'()*+,-./:;<=>?[\\\\]^_`{|}~'\n\n for i in range(1,max):\n for j in xrange(0,len(tokens)-(i-1)):\n if __check_features(tokens[j:j+i],stopwords):\n ng_str = \" \".join(tokens[j:j+i])\n ng_str = (ng_str.rstrip(string.punctuation)).lstrip(left_punctuation) \n ngrams.append(ng_str)\n \n ng_str = \" \".join(tokens)\n ng_str = (ng_str.rstrip(string.punctuation)).lstrip(left_punctuation) \n ngrams.append(ng_str)\n return ngrams", "def getgrams(text, tokendict):\n n = len(text)\n grams = []\n # Get lower-case of words\n if n >= 1:\n grams.append(tokendict[text[0]].lemma.lower())\n grams.append(tokendict[text[-1]].lemma.lower())\n grams.append(tokendict[text[0]].pos)\n grams.append(tokendict[text[-1]].pos)\n if n >= 2:\n token = tokendict[text[0]].lemma.lower() \\\n + ' ' + tokendict[text[1]].lemma.lower()\n grams.append(token)\n token = tokendict[text[-2]].lemma.lower() \\\n + ' ' + tokendict[text[-1]].lemma.lower()\n grams.append(token)\n return grams", "def pgram(w, freqs, N):\n mle = 0\n fifo = [':'] * N\n for i in range(N-1):\n \tw += ':'\n for c in w:\n fifo.pop(0)\n fifo.append(c)\n n = N\n ngram = ''.join(fifo[:n])\n p = log(MLE(ngram, freqs))\n mle += p\n return mle", "def count_ngrams(self):\n self.unigram = self.count_unigram(self.word_list)\n self.bigram = self.count_bigram(self.word_list)\n # self.trigram = self.count_trigram(self.word_list)\n # self.four_gram = self.count_four_gram(self.word_list)\n # self.five_gram = self.count_five_gram(self.word_list)", "def precook(s, n=4, out=False):\n words = s.split()\n counts = defaultdict(int)\n for k in xrange(1,n+1):\n for i in xrange(len(words)-k+1):\n ngram = tuple(words[i:i+k])\n counts[ngram] += 1\n return counts", "def generate_ngram(corpus,n=2):\r\n def generate_ngram_str(text,n):\r\n text = tokenizer.tokenize(text)\r\n for i in range(0, len(text)-n+1):\r\n yield text[i:i+n]\r\n if isinstance(corpus,str):\r\n for ngram in generate_ngram_str(corpus,n):\r\n yield ngram\r\n elif isinstance(corpus, (list, types.GeneratorType)):\r\n for text in corpus:\r\n for ngram in generate_ngram_str(text,n):\r\n yield ngram", "def get_ngrams(stats,s,t,i):\n #lemma ngrams\n ngram_sizes = [\"bi\", \"tri\"]\n for ngram_size in ngram_sizes:\n lm_ngram = get_lemma_ngrams(s, t, i, ngram_size)\n if lm_ngram:\n put_feature_value_list(stats,\"lemma_\" + ngram_size + \"gr\", lm_ngram)\n\n #POS and deprel bigrams\n if i < s.length-1:\n put_feature_value_list(stats,\"deprels_bigr\", (t.deprel,s.nodes[i+1].deprel))\n put_feature_value_list(stats,\"pos_bigr\", (t.pos,s.nodes[i+1].pos))\n \n #POS and deprel trigrams\n if i < s.length-2:\n put_feature_value_list(stats,\"deprels_trigr\", (t.deprel, s.nodes[i+1].deprel, s.nodes[i+2].deprel))\n put_feature_value_list(stats,\"pos_trigr\", (t.pos, s.nodes[i+1].pos, s.nodes[i+2].pos))\n\n return stats", "def ngram_encoding(self, data):\n _buffer = list()\n for word in data:\n if self.composition == \"bi-lstm\" or self.composition == \"addition\":\n ngrams = self.word_to_ngrams(word)\n _buffer.append(self.padding(ngrams, self.max_ngram_per_word,\n self.ngram_to_id[\"<PAD>\"]))\n else:\n sys.exit(\"Unknown composition\")\n return _buffer", "def gen_ngrams(items, n):\n ngs = {}\n ilen = len(items)\n for i in xrange(ilen-n+1):\n ng = tuple(items[i:i+n])\n ngs[ng] = ngs.get(ng, 0) + 1\n return ngs", "def ngrams(self, words):\n if words[0] in self._locs:\n for loc in self._locs[words[0]]:\n ngram = NGram.from_loc(len(words), loc)\n if ngram.words == words:\n yield ngram", "def generate_ngrams(iterable, n):\n return zip(*[itertools.islice(it, i, None) for i, it in enumerate(itertools.tee(iterable, n))])", "def get_n_minus_1_grams(n_grams: str) -> str:\n return n_grams.rsplit(' ')[0]", "def extract_char_ngrams(self, s: str, n: int) -> Counter:\n return Counter([s[i:i + n] for i in range(len(s) - n + 1)])", "def extract_char_ngrams(self, s: str, n: int) -> Counter:\n return Counter([s[i:i + n] for i in range(len(s) - n + 1)])", "def extract_ngrams(self, sequence):\n sequence = self.prefix + sequence + self.suffix\n for i, event in enumerate(sequence[self.n:], self.n):\n yield event, sequence[i-self.n: i]", "def construct_seq_unique_ngrams(xs, n):\n seq_unique = list(map(lambda x: x[0], itertools.groupby(xs)))\n unique_ngrams = construct_ngrams(seq_unique, n)\n ngrams = []\n prev = None\n i = 0\n for x in xs:\n if prev and x != prev:\n # if not a sequentially repeated element\n # fetch a new ngram\n i += 1\n ngrams.append(unique_ngrams[i])\n prev = x\n return ngrams", "def _precook(s, n=4, out=False):\n if isinstance(s,str):\n words = s.split()\n # print(words)\n elif isinstance(s,list) or isinstance(s,tuple):\n words = s\n else:\n raise NotImplementedError(type(s))\n counts = defaultdict(int)\n for k in range(1, n + 1):\n for i in range(len(words) - k + 1):\n ngram = tuple(words[i:i + k])\n counts[ngram] += 1\n return (len(words), counts)", "def n_gram(data, headers, cat_labels, num_labels):\n\t\tn_grammed = []\n\n\t\tfor i, tok in enumerate(data):\n\t\t\tif i == 0:\n\t\t\t\tn_grammed.append(data[-1]+tok+data[i+1])\n\t\t\telif i == len(data) - 1:\n\t\t\t\tn_grammed.append(data[i-1]+tok+data[0])\n\t\t\telse:\n\t\t\t\tn_grammed.append(data[i-1]+tok+data[i+1])\n\n\t\tn_grammed_headers = [header + \"_min1\" for header in headers] + headers + [header + \"_pls1\" for header in headers]\n\t\tn_grammed_cat_labels = [lab + \"_min1\" for lab in cat_labels] + cat_labels + [lab + \"_pls1\" for lab in cat_labels]\n\t\tn_grammed_num_labels = [lab + \"_min1\" for lab in num_labels] + num_labels + [lab + \"_pls1\" for lab in num_labels]\n\n\t\treturn n_grammed, n_grammed_headers, n_grammed_cat_labels, n_grammed_num_labels", "def reconstruct_ngram(self, ngram):\n\n punc_b = ['!', '?', '.', ',', ';', ':', '\\'', ')', ']', '}']\n punc_a = ['(', '[', '}', '$']\n ngram = ' '.join(ngram)\n for p in punc_b:\n ngram = ngram.replace(' '+p, p)\n for p in punc_a:\n ngram = ngram.replace(p+' ', p)\n ngram = re.sub('(^| )BEGQ', ' \"', ngram)\n ngram = re.sub('ENDQ($| )', '\" ', ngram)\n ngram = ngram.replace('DOUBLEDASH', '--')\n return ngram", "def count_n_grams(data, n, start_token='<s>', end_token = '<e>'):\r\n \r\n # Initialize dictionary of n-grams and their counts\r\n n_grams = {}\r\n\r\n \r\n for sentence in data: # complete this line\r\n \r\n # prepend start token n times, and append <e> one time\r\n sentence = [start_token]*n + sentence + [end_token]\r\n \r\n # convert list to tuple\r\n # So that the sequence of words can be used as\r\n # a key in the dictionary\r\n sentence = tuple(sentence)\r\n\r\n \r\n for i in range(len(sentence)+1-n): # complete this line\r\n\r\n # Get the n-gram from i to i+n\r\n n_gram = sentence[i:i+n]\r\n\r\n # check if the n-gram is in the dictionary\r\n if n_gram in n_grams: \r\n \r\n # Increment the count for this n-gram\r\n n_grams[n_gram] += 1\r\n else:\r\n # Initialize this n-gram count to 1\r\n n_grams[n_gram] = 1\r\n \r\n return n_grams", "def build_ngram_vocab(self, n):\n max_ngram_per_word = 0\n ngram_dict = collections.defaultdict(int)\n for word in self.train_data:\n if word == self.eos or word == self.sos:\n continue\n _word = '^' + word + '$'\n ngram_counts = len(_word) - n + 1\n if ngram_counts > max_ngram_per_word:\n max_ngram_per_word = ngram_counts\n for i in range(ngram_counts):\n ngram = _word[i:i + n]\n ngram_dict[ngram] += 1\n\n unk_ngram_list = set()\n item_to_id = dict()\n item_to_id[constants.PAD_ITEM] = len(item_to_id)\n item_to_id[constants.UNK_ITEM] = len(item_to_id)\n sorted_dict = sorted(ngram_dict.items(), key=operator.itemgetter(1), reverse=True)\n for token, freq in sorted_dict:\n if freq == 1:\n unk_ngram_list.add(token)\n if token not in item_to_id:\n item_to_id[token] = len(item_to_id)\n return item_to_id, unk_ngram_list, max_ngram_per_word", "def ngrams(n, target):\n chars = collections.deque()\n while True:\n chars.append((yield))\n if len(chars) == n: \n target.send(chars)\n chars.popleft()", "def __generateSentences(self, ngrams, n, length, repetition, seed):\n randInt = random.randint(1, repetition)\n sent = ''\n for i in range(randInt):\n sent += self.__markovGen(self.ngrams, n, length, seed)\n sent += ' '\n return sent", "def get_ngramlogprobs_fromcorpus(tokenizedseqs, n):\n return", "def extract_ngrams(line, min_order=1, max_order=NGRAM_ORDER) -> Counter:\n\n ngrams = Counter()\n tokens = line.split()\n for n in range(min_order, max_order + 1):\n for i in range(0, len(tokens) - n + 1):\n ngram = ' '.join(tokens[i: i + n])\n ngrams[ngram] += 1\n\n return ngrams", "def swap_n_grams(self, all_n_grams, n): #TODO do this in preprocessing with tokens... or its fine?\n all_new_tokens = self.lemma_tokens\n index = 0\n for zettel in all_n_grams:\n if len(zettel) != 0:\n for new_gram in zettel:\n for token_zettel in all_new_tokens:\n token_index = 0\n for word in token_zettel:\n if n == 2:\n if token_index != len(token_zettel)-1:\n if word[0] + \" \" + token_zettel[token_index+1][0] == new_gram:\n word[0] = new_gram\n word[1] = 'NG'\n del token_zettel[token_index+1]\n if n == 3:\n if token_index != len(token_zettel)-1:\n if token_index != len(token_zettel)-2:\n if word[0] + \" \" + token_zettel[token_index+1][0] + \" \" + token_zettel[token_index+2][0] == new_gram:\n word[0] = new_gram\n word[1] = 'NG'\n del token_zettel[token_index+1]\n del token_zettel[token_index+2]\n token_index += 1\n index += 1\n self.lemma_tokens = all_new_tokens", "def __iter__(self):\n return self.ngrams()", "def extract_ngrams(line, min_order=1, max_order=BLEU.NGRAM_ORDER) -> Counter:\n\n ngrams = Counter() # type: Counter\n tokens = line.split()\n for n in range(min_order, max_order + 1):\n for i in range(0, len(tokens) - n + 1):\n ngram = ' '.join(tokens[i: i + n])\n ngrams[ngram] += 1\n\n return ngrams", "def create_ngram_set(input_list, ngram_value=2):\n return set(zip(*[input_list[i:] for i in range(ngram_value)]))", "def get_lemma_ngrams(s, t, i, ngram_size):\n ngrams = {\"uni\":1, \"bi\":2, \"tri\":3}\n if i < s.length-(ngrams[ngram_size]-1):\n lemma_ngrams = []\n for j in range(ngrams[ngram_size]):\n if s.nodes[i+j].lemma:\n lemma_ngrams.append(s.nodes[i+j].lemma[0]) #only keeps the first lemma\n else:\n lemma_ngrams.append(s.nodes[i+j].word + \"*\") #no lemma, word form* instead\n if len(lemma_ngrams) > 1:\n return tuple(lemma_ngrams)\n return lemma_ngrams[0] #or better to keep them all?\n else:\n return []", "def get_preds_ngram(preds, len_preds, n):\n from utils.dataProcess import get_N_gram\n\n def iter_preds(preds, len_preds):\n for len, utt in zip(len_preds, preds):\n for token in utt[:len]:\n yield token.numpy()\n ngrams = get_N_gram(iter_preds(preds, len_preds), n)\n\n return ngrams", "def bigram_representation(data):\r\n vec = CountVectorizer(ngram_range=(1,2))\r\n vec = vec.fit(data)\r\n return vec", "def create_ngram_set(input_list, ngram_value=2):\n return set(zip(*[input_list[i:] for i in range(ngram_value)]))", "def calculate_ngram_frequencies(text, n):\n\n import re\n\n # Create a new dictionary\n ngram_dict = {}\n\n # Find all sentences\n sentences_list = re.findall(r'[^\\.\\?!\"]+', text)\n\n # Iterate over sentences in the list\n for sentence in sentences_list:\n # Split words by a whitespace character\n words_list = sentence.rsplit()\n\n # Iterate over ngrams in the sentence\n for i in range(len(words_list) - n + 1):\n\n # Join the words to size of n\n ngram = ' '.join(words_list[i:i + n])\n\n # Record the presence of a new ngram\n if not ngram in ngram_dict:\n ngram_dict[ngram] = 1\n\n # Add the number of occurrence of the ngram\n elif ngram in ngram_dict:\n ngram_dict[ngram] += 1\n\n return ngram_dict", "def range_ngrams(tokens, ngramRange=(1,2)):\n return chain(*(n_grams(tokens, i) for i in range(*ngramRange)))", "def add_ngram(sequences, token_indice, ngram_range=2):\n new_sequences = []\n for input_list in sequences:\n new_list = input_list[:]\n for ngram_value in range(2, ngram_range + 1):\n for i in range(len(new_list) - ngram_value + 1):\n ngram = tuple(new_list[i:i + ngram_value])\n if ngram in token_indice:\n new_list.append(token_indice[ngram])\n new_sequences.append(new_list)\n\n return new_sequences", "def transform(self, X):\n out= [self._word_ngrams(text,ngram=self.word_ngrams)\n for text in X]\n return out", "def __iter__(self):\n return self.root.ngrams()" ]
[ "0.78278804", "0.76701784", "0.76432234", "0.7412483", "0.7388992", "0.7383285", "0.7343278", "0.7261323", "0.725827", "0.7243522", "0.72036326", "0.7154389", "0.7147983", "0.71211857", "0.71205693", "0.70832944", "0.70656013", "0.70451486", "0.70400804", "0.6991556", "0.6975258", "0.6938556", "0.6926962", "0.6925717", "0.6918904", "0.6916664", "0.687971", "0.687417", "0.6863475", "0.6862314", "0.68422407", "0.6792676", "0.6778424", "0.6752067", "0.672995", "0.67270416", "0.67038596", "0.66994864", "0.6685978", "0.6672666", "0.66536814", "0.66518605", "0.66249526", "0.6584799", "0.6574138", "0.6561952", "0.65586895", "0.6556419", "0.65095705", "0.6509321", "0.6506329", "0.65043867", "0.6484509", "0.6481817", "0.6474019", "0.6448554", "0.64352244", "0.63822776", "0.633758", "0.6333866", "0.63268536", "0.63249546", "0.63152367", "0.6309791", "0.63029134", "0.6289609", "0.62601775", "0.62599623", "0.6232715", "0.6229458", "0.6224892", "0.6223391", "0.6223286", "0.61973375", "0.6186034", "0.6186034", "0.61831254", "0.6178851", "0.6132807", "0.61126375", "0.6088702", "0.6084162", "0.60835564", "0.6070679", "0.60671926", "0.6059905", "0.6056952", "0.6053116", "0.6022536", "0.60144776", "0.60100275", "0.60077536", "0.5990931", "0.59733945", "0.5966517", "0.5963193", "0.59483427", "0.592427", "0.591624", "0.5911827" ]
0.64976317
52
return most used noun_phrases
def get_phrases(self, first=10): return get_occurences(self.lemmatized_phrases)[:first]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def most_words(self, n):\n return big_tags", "def frequent_nouns_counts(self, tokens):\n\n unigram_fd = nltk.FreqDist(tokens)\n pos = POS()\n common_unigrams = unigram_fd.most_common(\n int(self.top_pct * len(unigram_fd)))\n\n nouns = [pair for pair in common_unigrams\n if pair[0] not in self.stopwords()\n and pos.percent_noun(pair[0]) > 0.5]\n\n return nouns", "def get_top_words(self, label, n):\n score_list = []\n if('sod' in label):\n for term in self.vocab:\n score = self.cond_prob_sod[term] / self.cond_prob_pop[term]\n score_list.append((score,term)) \n else:\n for term in self.vocab:\n score = self.cond_prob_pop[term] / self.cond_prob_sod[term]\n score_list.append((score,term))\n score_list = sorted(score_list, key=lambda x:x[0],reverse=True)[:n]\n return score_list \n pass", "def print_most_frequent(ngrams, num=10):\n for n in sorted(ngrams):\n print('----- {} most common {}-grams -----'.format(num, n))\n for gram, count in ngrams[n].most_common(num):\n print('{0}: {1}'.format(' '.join(gram), count))\n print('')", "def test_top_n_grams():\n ngrams = NgramFrequencies()\n unigrams_dic = {\n \"COUNT\": 10,\n \"time_burton's\": 5,\n \"burton's_corpse\": 4,\n \"corpse_bride\": 1\n }\n top_n_unigrams = ngrams.top_n_grams(unigrams_dic, 2)\n assert top_n_unigrams == [\n (\"time_burton's\", 0.5),\n (\"burton's_corpse\", 0.4)\n ]", "def print_most_frequent(ngrams, num=10):\n for n in sorted(ngrams):\n print('----- {} most common {}-grams -----'.format(num, n))\n for gram, count in ngrams[n].most_common(num):\n print('{0}: {1}'.format(' '.join(gram), count))\n print('')", "def print_most_frequent(ngrams, num=10):\n for n in sorted(ngrams):\n print('----- {} most common {}-grams -----'.format(num, n))\n for gram, count in ngrams[n].most_common(num):\n print('{0}: {1}'.format(' '.join(gram), count))\n print('')", "def print_most_frequent(ngrams, num=10):\r\n for n in sorted(ngrams):\r\n print('----- {} most common {}-grams -----'.format(num, n))\r\n for gram, count in ngrams[n].most_common(num):\r\n print('{0}: {1}'.format(' '.join(gram), count))\r\n print('')", "def pronoun_instance_dist(novel, words):\n text = novel.get_tokenized_text()\n output = []\n count = 0\n start = False\n\n for e in text:\n e = e.lower()\n if not start:\n if e in words:\n start = True\n else:\n count += 1\n if e in words:\n output.append(count)\n count = 0\n return output", "def get_top_tweet_ngrams(corpus, dim=2, n=None):\r\n vec = CountVectorizer(ngram_range=(dim, dim)).fit(corpus)\r\n bag_of_words = vec.transform(corpus)\r\n sum_words = bag_of_words.sum(axis=0) \r\n words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]\r\n words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True)\r\n return words_freq[:n]", "def top_question_words(args, examples, word_dict):\n word_count = Counter()\n for ex in examples:\n for w in ex['question']:\n w = Dictionary.normalize(w)\n if args.uncased_question:\n w = w.lower()\n if w in word_dict:\n word_count.update([w])\n return word_count.most_common(args.tune_partial)", "def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]", "def test_top_n_counts():\n ngrams = NgramFrequencies()\n new_dic = {\n \"a\": 1,\n \"b\": 2,\n \"c\": 3,\n \"d\": 4\n }\n top_list = ngrams.top_n_counts(new_dic)\n assert top_list == [(\"d\", 4), (\"c\", 3), (\"b\", 2), (\"a\", 1)]", "def get_most_probable_sentence(\n self,\n suggestions: List[List[str]]\n ) -> str:\n sent_word_count = len(suggestions)\n suggestions = [[tok] for tok in ContextModel.START_TOKENS] + suggestions + \\\n [[tok] for tok in ContextModel.END_TOKENS]\n memory = [[MemoryItem(score=0.0, decoded=tuple())], [MemoryItem(score=0.0, decoded=tuple())]]\n for t in range(2, len(suggestions)):\n memory.append([])\n for i, word in enumerate(suggestions[t]):\n mx_score, pick_1, pick_2 = 0, 0, 0\n for j, suggestion_1 in enumerate(suggestions[t - 1]):\n for k, suggestion_2 in enumerate(suggestions[t - 2]):\n curr_score = memory[-3][k].score \\\n + self.model_dict.get((suggestion_2, suggestion_1), self.default_prob) \\\n + self.model_dict.get((suggestion_1, word), self.default_prob) \\\n + self.model_dict.get((suggestion_2, word), self.default_prob)\n if curr_score > mx_score:\n mx_score, pick_1, pick_2 = curr_score, j, k\n memory_item = MemoryItem(score=mx_score, decoded=memory[-3][pick_2].decoded + (pick_2, pick_1,))\n memory[-1].append(memory_item)\n memory = memory[1:]\n\n decoded = ' '.join([suggestions[t][i] for t, i in enumerate(memory[-1][0].decoded[-sent_word_count:],\n start=2)])\n # score = memory[-1][0].score\n return decoded", "def most_words_and_longest(self, n):\n return big_tags", "def get_noun_phrases(blob):\n return blob.noun_phrases", "def top_sentences(query, sentences, idf, n):\n ll=[]\n for s in sentences:\n st=sentences[s]\n st=[word.lower() for word in st]\n found_word=0\n total_idf=0\n\n for word in query:\n if word in st:\n total_idf+=idf[word]\n found_word+=1 \n ll.append((total_idf,found_word/len(st),s))\n ll.sort(reverse=True)\n #print(ll)\n ans=[]\n for i in range(n):\n ans.append(ll[i][2])\n #print(\"answer is : \",*ans)\n return ans", "def top_chars(phrase):\n phrase = phrase.split()\n letter_counts = {}\n\n # loops through phrase and adds word name to key with the length of the word. If no such key exists, it is created\n for word in phrase:\n for letter in word:\n if letter in letter_counts:\n letter_counts[letter] = letter_counts[letter] + 1\n else:\n letter_counts[letter] = 1\n\n most_used = []\n # loops through each key in the dictionary of usage counts and checks if it has the highest usage count.\n # if it does, it replaces the old elements in the list. If it is used as much as the currently most-used letter,\n # it is appended to the list.\n for key in letter_counts:\n if most_used == []:\n most_used.append(key)\n elif letter_counts[key] > letter_counts[most_used[0]]:\n most_used = [key]\n elif letter_counts[key] == letter_counts[most_used[0]]:\n most_used.append(key)\n\n return sorted(most_used)", "def nmax(num, T, nwords):\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(data['all_words'][n])\n return nwords", "def nmax(num, T, nwords):\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(data['all_words'][n])\n return nwords", "def get_top_n_words(word_list, n):\n\tfreq_dict = make_freq_dict (word_list) # get a dictionary\n\tordered_by_frequency = sorted(freq_dict, key=freq_dict.get, reverse=True) # sort\n\tprint ordered_by_frequency[0:n] # print\n\treturn ordered_by_frequency[0:n]", "def get_nouns(self):\n\t\tblob = TextBlob(self.raw_string)\n\n\t\tfor word, tag in blob.tags:\n\t\t\tif tag in ['NNP', 'NN']:\n\t\t\t\tself.nouns.append(word.lemmatize())", "def test_get_top_n_words_same_frequency(self):\n expected = ['happy', 'man']\n actual = get_top_n_words({'happy': 2, 'man': 2}, 2)\n self.assertEqual(expected, actual)\n expected = ['happy']\n actual = get_top_n_words({'happy': 2, 'man': 2}, 1)\n self.assertEqual(expected, actual)", "def get_related_words(initial_words, model):\n \n unseen = initial_words\n \n seen = defaultdict(int)\n \n max_size = 1000 # could be greater\n \n while unseen and len(seen) < max_size:\n if len(seen) % 50 == 0: \n print('seen length : {}'.format(len(seen)))\n \n node = unseen.pop(0)\n \n new_expanding = [w for w, s in model.most_similar(node, topn=20)]\n \n unseen += new_expanding\n \n seen[node] += 1\n \n # optimal: 1. score function could be revised\n # optimal: 2. using dymanic programming to reduce computing time\n \n return seen", "def get_related_words(initial_words, model):\n \n unseen = initial_words\n \n seen = defaultdict(int)\n \n max_size = 1000 # could be greater\n \n while unseen and len(seen) < max_size:\n if len(seen) % 50 == 0: \n print('seen length : {}'.format(len(seen)))\n \n node = unseen.pop(0)\n \n new_expanding = [w for w, s in model.most_similar(node, topn=20)]\n \n unseen += new_expanding\n \n seen[node] += 1\n \n # optimal: 1. score function could be revised\n # optimal: 2. using dymanic programming to reduce computing time\n \n return seen", "def longest_words(self, n=10):\n return sorted(set(self.text), key=len, reverse=True)[:n]", "def negatize_nouns(self, sent_target, max_synset_len=3):\n candidates = {}\n flatten = lambda l: [item for sublist in l for item in sublist]\n for w in sent_target[\"nouns\"]:\n candidates[w] = [(w, self.evaluator.value_evaluation(w))]\n synsets = Word(w).get_synsets(pos=NOUN)[:max_synset_len]\n upper_meanings = []\n for ss in synsets:\n hype = flatten([h.lemmas() for h in ss.hypernyms()])\n hypo = flatten([h.lemmas() for h in ss.hyponyms()])\n upper_meanings += hype\n upper_meanings += hypo\n upper_meanings += flatten([u.antonyms() for u in upper_meanings])\n upper_meanings = list(set(upper_meanings))\n for l in upper_meanings:\n val = self.evaluator.value_evaluation(l.name().lower())\n candidates[w].append((l.name().lower(), val))\n return candidates", "def most_similar(self, article: str, topn: int = 5):\n return [article[0] for article in self._model.similar_by_word(article, topn)]", "def pos_text(text):\n nlp = spacy.load('en')\n doc = nlp(text)\n # all tokens that arent stop words or punctuations\n words = [token.text.encode('ascii', 'ignore') for token in doc if token.is_stop != True and token.is_punct != True]\n\n # noun tokens that arent stop words or punctuations\n final_tokens = [token.text.encode('ascii', 'ignore') for token in doc if token.is_stop != True and \\\n token.is_punct != True and (token.pos_ == \"NOUN\" or token.pos_ == \"VERB\")]\n\n # frequency dictionary for all tokens\n word_freq = Counter(words)\n\n #top 100 words to display in wordcloud which are noun or verb\n #frequency will be used to show big/small words in wordcloud\n final_tokens_freq = Counter(final_tokens)\n result = final_tokens_freq.most_common(config.config[\"MAX_FREQUENCY\"])\n #print result\n return result", "def mostFrequentNGram(text,n):\n\n ngram = calculateNGrams(text,n)\n return counterFrequency(ngram)", "def find_top_words_for_states(n, O, symbols):\n Osize = O.shape\n\n top_words = {}\n top_words_prob = {}\n for state in range(Osize[0]):\n top_index = sorted(range(len(O[state])), key=lambda i: O[state, i], reverse=True)\n top_words[state] = [(symbols[ind], O[state, ind]) for ind in top_index[0:n]]\n\n curr_prob = O[state, top_index[0]]\n top_words_prob_temp = []\n ind = 0\n while curr_prob < 0.5:\n top_ind = top_index[ind]\n top_words_prob_temp.append((symbols[top_ind], O[state, top_ind]))\n ind += 1\n curr_prob += O[state, top_index[ind]]\n\n top_words_prob[state] = top_words_prob_temp\n\n return top_words, top_words_prob", "def get_top_n_words(word_list, n):\n d = dict()\n for w in word_list:\n d[w] = d.get(w, 0) + 1\n ordered_by_frequency = sorted(d, key=d.get, reverse=True)\n return ordered_by_frequency[0:n]", "def predict_currword_given_lastword(first_word, second_word, top_n=10):\r\n return Counter(\r\n {\r\n w: c\r\n for w, c in model.WORD_TUPLES_MODEL[first_word.lower()].items()\r\n if w.startswith(second_word.lower())\r\n }\r\n ).most_common(top_n)", "def top_ngrams(ngram_dict,ntop):\n # get list of all ngrams and counts\n ngrams = ngram_dict.items()\n # sort according to the count\n # we use a python lambda function here to say that we want to sort by the\n # second item in the tuples contained in the list\n ngrams.sort(key=lambda x: x[1], reverse=True)\n # return the top ngrams\n return ngrams[0:ntop]", "def get_top_n_words(word_list, n):\n word_counts = dict()\n\n for word in word_list:\n freq = word_counts.get(word, 1)\n word_counts[word] = freq + 1\n\n ordered_by_frequency = sorted(word_counts, key=word_counts.get, reverse=True)\n\n return ordered_by_frequency[0:n]", "def get_word_list(file_name, n):\n f = open(file_name, 'r')\n text = f.read()\n words = re.compile('\\w+').findall(text)\n return get_top_n_words(words, n)", "def get_top_n_words(filename, n, to_search_word_or_not, word_to_serach, get_random):\n\n histogram = get_word_list(filename, True) #calls histogram file\n output = []\n for word,value in histogram.items(): #sorts words into new histogram that has value, word pairs to sort\n output.append((value,word))\n output.sort()\n output.reverse() #sorting from greatest to least\n final_n_output = []\n\n if get_random == True: #possibly sending getrandom funtion to get random words\n random_word = getrandom(histogram)\n else:\n random_word = None\n\n if to_search_word_or_not == True: #possibly sending getrandom funtion to get random words\n num_of_word = search_for_a_word(histogram, word_to_serach)\n else:\n num_of_word = None\n\n for i in range(n):\n final_n_output.append(output[i]) #making a final output list\n\n print(random_word)\n\n return final_n_output, num_of_word, random_word", "def ngram_frequency(word):\r\n\tword = word.lower()\r\n\tword = re.sub(r'[^A-Za-z. ]','',word)\r\n\tngram_statistics = {}\r\n\tngram_categorization_model_keys = []\r\n\tngram_categorization_model_occurances = []\r\n\tres = [0 for _ in range(0,300)]\r\n\tfor ituple in ngram_categorization_model:\r\n\t\tngram_categorization_model_keys.append(ituple[0])\r\n\t\tngram_categorization_model_occurances.append(int(ituple[1]))\r\n\tfor grams in range(2,6):\r\n\t\tfor i in range(len(word)-grams+1):\r\n\t\t\tseq = word[i:i+grams]\r\n\t\t\tif seq not in ngram_statistics.keys():\r\n\t\t\t\tngram_statistics.update({seq:1})\r\n\t\t\telse:\r\n\t\t\t\tngram_occurances = ngram_statistics[seq]\r\n\t\t\t\tngram_statistics.update({seq:ngram_occurances+1})\r\n\tngram_frequency_keys = ngram_statistics.keys()\r\n\tngram_frequency_occurances = list(ngram_statistics.values())\r\n\tfor index, val in enumerate(ngram_categorization_model_keys):\r\n\t\tfor index1, val1 in enumerate(ngram_frequency_keys):\r\n\t\t\tif val == val1:\r\n\t\t\t\tres[index] = ngram_categorization_model_occurances[index]*ngram_frequency_occurances[index1]\r\n\treturn res", "def get_most_informative_word(self, documents, vocabulary):\n most_informative_word = None\n most_informative_word_gain = 0\n for word in vocabulary:\n gain = self.get_information_gain(word, documents)\n if most_informative_word == None or gain >= most_informative_word_gain:\n most_informative_word = word\n most_informative_word_gain = gain\n return most_informative_word", "def most_reducible(wordlist):\n\n\t# We create a memo for reducible words since is_reducible is \n\t# recursive. The keys are the words and the values are the \n\t# number of characters\n\tglobal reducible_words\n\treducible_words = dict()\n\treducible_words['a'], reducible_words['i'] = 1, 1\n\t\n\tword_dict = to_dictionary(wordlist)\n\tfor line in word_dict:\n\t\tis_reducible(line, word_dict)\n\n\t# Varible that will search the memo for the longest word\n\tcurrent_greatest = ''\n\tfor word in reducible_words:\n\t\tif reducible_words[word] > len(current_greatest):\n\t\t\tcurrent_greatest = word\n\tprint(current_greatest)", "def get_top_n_words(column, n):\r\n frequencies = Counter()\r\n column.str.lower().str.split().apply(frequencies.update)\r\n return frequencies.most_common(n)", "def get_nouns(txt):\n query = 'https://api.textgain.com/1/tag?q='\n query += urllib.parse.quote(txt, safe='')\n query += '&lang=fr&key=***'\n resp = requests.get(query)\n\n body = json.loads(resp.text)['text'][0]\n\n nouns = {}\n for iterable_elem in body:\n for elem in iterable_elem:\n if elem['tag'] == 'NOUN':\n word = elem['word']\n if word in nouns.keys():\n nouns[word] += 1\n else:\n nouns[word] = 1\n print(nouns)\n return nouns", "def _top_n_words(n, f_name):\n word_dict, idx_dict, word_cnt = _extract_words(f_name)\n print (\"number of words: %d\" % len(word_cnt))\n n = min(len(word_cnt), n)\n np_cnt = np.array(word_cnt)\n idx = np.argpartition(np_cnt, -n)[-n:]\n res = []\n for i in idx:\n res.append((idx_dict[i], np_cnt[i]))\n res.sort(key=lambda t: t[1], reverse=True)\n return res", "def get_top_n_words(word_list, n):\n\tfreqs = get_word_frequencies(word_list)\n\tfreq_words = sorted(freqs, key=freqs.get, reverse=False)\n\treturn freq_words[:n]", "def most_common(self, number=10):\n\n words_full_list = []\n\n for string in self.__corpora:\n words_full_list += string.split()\n\n print(Counter(words_full_list).most_common(number))", "def get_10_most_frequent_words(tokens):\n\n return FreqDist(word.lower() for word in tokens).most_common(10)", "def wcount(lines, topn):\n word = ''\n for i in lines:\n if 65<=ord(i) and ord(i)<=90:\n word = word + i \n elif 97<=ord(i) and ord(i)<=122:\n word = word + i\n else:\n word = word + ' ' \n word = word.split()\n #提取不重复的单词\n alreadyknown = []\n for m in word:\n if m not in alreadyknown:\n alreadyknown.append(m)\n #分别数数,排序,建构字典\n empty = []\n final = {}\n final2 = {}\n for j in alreadyknown:\n number = icount(word,j)\n final[j]=number\n final2[str(number)]=j\n empty.append(number)\n empty.sort()\n empty.reverse()\n last_step = empty[:10]\n #通过数字找到对应word\n last_str = ''\n for y in last_step:\n z = final2[str(y)]\n last_str += z + \"\\t\" + str(y) + \"\\n\"\n return last_str", "def get_nnp_ngrams(original_text, highlight=4, minsize=0):\n minsize = minsize-1\n if minsize<0:\n minsize = 0 \n \n tokens = nltk.wordpunct_tokenize(original_text)\n tagged = nltk.word_tokenize(original_text)\n tagged = nltk.pos_tag(tokens)\n #for word in tagged:\n # print word\n doc_length = len(tokens)\n counter = 0\n counter2 = 0\n if highlight==0:\n concated_test = doc_length # This is set to doc_length but could be anything recommend 3.\n else:\n concated_test = highlight\n list_of_NNPs = []\n while counter < (doc_length-1):\n while counter2 < concated_test:\n counter2 = counter2+1\n counter3 = 0\n #print '--------------------'\n temp_array = []\n all_nnp = True\n while counter3 < counter2:\n if counter < (doc_length-counter3):\n #print tokens[counter+counter3],tagged[counter+counter3][1]\n temp_array.append(tokens[counter+counter3])\n if tagged[counter+counter3][1] != 'NNP':\n all_nnp = False\n counter3 = counter3+1\n counter3 = 0\n if all_nnp == True:\n if(len(temp_array)>minsize):\n list_of_NNPs.append(temp_array)\n #print 'added to main array'\n #else:\n #print 'not all NNPs'\n counter2 = 0\n counter = counter+1\n #for ngram in list_of_NNPs:\n # print ngram\n import itertools\n list_of_NNPs.sort()\n unique_NNPs = list(list_of_NNPs for list_of_NNPs,_ in itertools.groupby(list_of_NNPs))\n return unique_NNPs", "def most_common(filename,n):\n\tfreq_dict = dictionary_creation(filename)\n\tt = []\n\tfor key, value in freq_dict.items():\n\t\tt.append((value,key))\n\t\tt.sort(reverse=True)\n\twordlist = []\n\tfreqlist = []\n\tprint n, 'most common words:'\n\tfor freq,word in t[0:n]:\n\t\tprint word,'\\t', freq\n\t\twordlist.append(word)\n\t\tfreqlist.append(freq)\n\treturn wordlist,freqlist", "def get_top_n_words(word_list, n):\n words = []\n\n # Change all words to lowercase\n for word in word_list:\n word = str.lower(word)\n if word not in words:\n words.append(word)\n\n # Calculate frequency of each word\n frequency = []\n for word in words:\n word_count = 0\n for test in word_list:\n if word == test:\n word_count += 1\n frequency.append(word_count)\n\n dic = dict()\n for i, word in enumerate(words):\n dic[frequency[i]] = word\n\n # Sort dictionary to return ranks\n keys = dic.keys()\n keys = sorted(keys)\n words_ranked = []\n for key in keys:\n words_ranked.append(dic.get(key))\n words_ranked = words_ranked[::-1]\n words_ranked = words_ranked[:n]\n return words_ranked", "def ngrammer(text, gramsize = 3, threshold = 4):\n # we need to import this in order to find the duplicates:\n import nltk\n from nltk.util import ngrams\n from collections import defaultdict\n # get ngrams of gramsize \n if type(text) != list:\n text = tokenised = nltk.word_tokenize(text)\n text = [token for token in text if token.isalnum()]\n # get ngrams of gramsize \n raw_grams = ngrams(text, gramsize)\n \n # a subdefinition to get duplicate lists in a list\n def list_duplicates(seq):\n tally = defaultdict(list)\n for i,item in enumerate(seq):\n tally[item].append(i)\n # return to us the index and the ngram itself:\n return ((len(locs),key) for key,locs in tally.items() \n if len(locs) > threshold)\n\n # use our duplication detector to find duplicates\n dupes = list_duplicates(raw_grams)\n # return them, sorted by most frequent\n return sorted(dupes, reverse = True)", "def get_top_keywords(entries):\n # Extract text for processing\n\n raw_text = [] # raw text in sentences\n for entry in entries:\n # Its a post\n if 'title' in entry:\n raw_text.append(entry['title'])\n raw_text += tokenize.sent_tokenize(entry['selftext'])\n else:\n raw_text += tokenize.sent_tokenize(entry['body'])\n \n # Tokenize\n tokens = tokenize_posts_keywords(raw_text)\n\n # 1-gram\n fdist_1 = FreqDist(tokens)\n top_keywords_1 = fdist_1.most_common(100)\n \n # 2-gram\n bigrams = ngrams(tokens, 2)\n fdist_2 = FreqDist(bigrams)\n top_keywords_2 = fdist_2.most_common(100)\n top_keywords_2 = [(f'{keywords[0]} {keywords[1]}', mentions) for keywords, mentions in top_keywords_2]\n\n # 3-gram\n trigrams = ngrams(tokens, 3)\n fdist_3 = FreqDist(trigrams)\n top_keywords_3 = fdist_3.most_common(100)\n top_keywords_3 = [(f'{keywords[0]} {keywords[1]} {keywords[2]}', mentions) for keywords, mentions in top_keywords_3]\n\n top_keywords = top_keywords_1 + top_keywords_2 + top_keywords_3\n return [{ 'keyword' : keyword, 'mentions' : mentions } for keyword, mentions in top_keywords]", "def countNouns( aList ):\n\ttotalNounCount = 0\n\tnounCount = 0\n\tfindNoun = re.compile('NN')\n\tfor x in aList:\n\t\tfor y in x:\n\t\t\tif findNoun.search(str(y)) is not None:\n\t\t\t\tnounCount += 1\n\t\tprint( nounCount )\n\t\tprint( \"\\n\" )\n\t\ttotalNounCount += nounCount\n\t\tnounCount = 0\n\treturn totalNounCount", "def find_frequent_words(words, most_frequent): \n \n # common_words = Counter(sorted(words))\n # print common_words\n common_words = Counter(sorted(words)).most_common(most_frequent)\n print (common_words )\n most_common_words = [w for w, w_count in common_words]\n return most_common_words", "def lookup_pronunciations_for_phrase(words: Sequence[Text]) -> Sequence[Phrase]:\n return EnglishUtils.all_possible_phrases_for(words)", "def most_common_words(counts, n=-1):\n\n result = sorted(list(counts.items()), key=lambda x: x[1], reverse=True)\n\n if n == -1:\n return result\n else:\n return result[:n]", "def _get_highest_nperms(n, words, dawg):\n best = (None, 0, []) # word, num perms, nperms\n\n words_counted = 0\n total_perms_counted = 0\n\n for word in words:\n words_counted += 1\n # Generate all n-length permutations of word\n nperms = [w for w in dawg.gen_completions('', word)]\n total_perms_counted += len(nperms)\n\n nperms = [w for w in nperms if len(w) == n]\n\n for nperm in nperms:\n try:\n words.remove(nperm)\n except ValueError:\n pass\n\n nperms = list(set(nperms))\n\n if len(nperms) > best[1]:\n best = (word, len(nperms), nperms)\n\n print(f'words permuted: {words_counted}')\n print(f'total permutations: {total_perms_counted}')\n return best", "def test_get_top_n_words_ideal(self):\n expected = ['man']\n actual = get_top_n_words({'happy': 2, 'man': 3}, 1)\n self.assertEqual(expected, actual)", "def preprocessing():\n english_dictionary = nltk.corpus.brown.words()\n slang_vocab = pickle.load(open('vocab_pattern_match_with_freq.pkl', 'rb'))\n\n normalize_english_dict = len(english_dictionary)\n normalize_slang_vocab = 0\n for w, n in slang_vocab.items():\n normalize_slang_vocab += n\n\n words = {}\n for w, n in Counter(english_dictionary).items():\n words[w] = n/normalize_english_dict\n \n for w, n in slang_vocab.items():\n if w not in words:\n words[w] = 0.\n words[w] += n/normalize_slang_vocab\n\n words_by_freq = [w for w,_ in sorted(words.items(), key=lambda x: x[1], reverse=True)]\n\n # Build a cost dictionary, assuming Zipf's law and cost = -math.log(probability).\n #words = open(\"words_by_frequency.txt\").read().split()\n wordcost = dict((k, log((i+1)*log(len(words_by_freq)))) for i,k in enumerate(words_by_freq))\n maxword = max(len(x) for x in words_by_freq)\n return wordcost,maxword", "def top_k_frequent(top_k, words, list_of_texts):\n dict_top_freq = {}\n for word in words:\n dict_top_freq[word.lower()] = 0\n for string in list_of_texts:\n if word.lower() in string.lower():\n counter = string.lower().count(word.lower())\n dict_top_freq[word.lower()] += counter\n\n list_top_sorted = sorted(dict_top_freq.items(), key=lambda item: item[1], reverse=True)\n print(list_top_sorted)\n\n list_k = []\n for i in list_top_sorted:\n list_k.append(i[0])\n\n return list_k[:top_k]", "def amount_nouns_and_numerals_stanford_nlp(self) -> int:\n stanza.download(self.lang, processors = 'tokenize,mwt,pos')\n nlp = stanza.Pipeline(self.lang, processors = 'tokenize,mwt,pos')\n doc = nlp(self.sent)\n for sentence in doc.sentences:\n for word in sentence.words:\n #if the part of speech is a noun, a proper noun or a numeral \n #(only for en) \n if self.lang == 'en':\n if word.upos == 'NOUN' or word.upos == 'PROPN' or word.upos == 'NUM':\n self.amount_nouns_and_num += 1\n elif self.lang == 'de' or self.lang == 'fr':\n if word.upos == 'NOUN' or word.upos == 'PROPN':\n self.amount_nouns_and_num += 1\n return self.amount_nouns_and_num", "def get_main_words(idioms_set):\r\n main_words = Counter([idiom.split()[-1] for idiom in idioms_set])\r\n print('main words:', '\\n', main_words)\r\n print('top 50 main words:', '\\n', main_words.most_common(50)) \r\n return list(main_words)", "def top_terms(self, nterms):\n return self.sql_session.query(Term)\\\n .filter(Term.term != '*')\\\n .order_by(desc(Term.relevance))[:nterms]", "def topn_similarity(word_vecs, word, n):\n vec = word_vecs[word]\n sim = dict()\n for w in word_vecs:\n if w != '<TOP>' and w != '<BOT>':\n # sim[w] = np.dot(vec, np.transpose(word_vecs[w]))\n sim[w] = 1 - spatial.distance.cosine(vec, word_vecs[w])\n # sim[w] = np.dot(vec, np.transpose(word_vecs[w]))/(mod(vec)*mod(np.transpose(word_vecs[w])))\n dd = OrderedDict(sorted(sim.items(), key=lambda x: x[1], reverse=True))\n return list(dd.items())[1:n+1]", "def task2(dictionary):\n word_count = Counter(dictionary)\n ans = word_count.most_common(10)\n print(ans)\n return ans", "def top_words(name):\n row = wiki[wiki['name'] == name]\n word_count_table = row[['word_count']].stack('word_count', new_column_name=['word','count'])\n return word_count_table.sort('count', ascending = False)", "def calculate_most_popular(text, n_populars, steam=False):\n fdist = calculate_fdist(text, steam)\n term = []\n for key, value in fdist.items():\n term.append((key, value))\n term.sort(key=lambda x: int(x[1]), reverse=True)\n return term[:n_populars]", "def most_common_words(n):\n with open(os.path.join('visualization', 'vocab.tsv')) as fd:\n words = fd.readlines()[:n]\n words = [word for word in words]\n save_path = os.path.join('visualization', 'vocab_' + str(n) + '.tsv')\n with open(save_path, 'w') as fd:\n for word in words:\n fd.write(word)", "def get_terms(corpus, n=4000):\n\n stop = StopLexicon()\n scrabble = ScrabbleLexicon()\n\n ngrams = defaultdict(set)\n citations = defaultdict(set)\n\n for doc in corpus:\n for ng in get_ngrams([doc.title]):\n if good_ngram(ng, scrabble, stop):\n ngrams[ng].add(doc.id)\n for ref in doc.references:\n citations[doc.id].add(ref)\n citations[ref].add(doc.id)\n\n ngrams = filter_plurals(ngrams)\n\n ngram_counts = {x: len(ngrams[x]) for x in ngrams}\n filtered = filter_subsumed(ngram_counts)\n\n if citations:\n ngrams = score_ngrams(ngrams, citations)\n ngrams = filter_subsumed(ngrams)\n return [' '.join(x) for x in sorted(ngrams, key=lambda x: ngrams[x],\n reverse=True) if x in filtered][:n]\n else:\n return [' '.join(x) for x in sorted(filtered, key=lambda x: filtered[x],\n reverse=True)][:n]", "def find_most_anagrams_from_wordlist(wordlist):\n # max_count = 1\n # count = 1\n # letters_dict = {}\n # most_anagrams_word = \"\"\n # letters_dict_list = []\n # for word in wordlist:\n # for letter in word:\n # letters_dict[letter] = letters_dict.get('letter', 0) + 1\n # letters_dict_list.append(sorted(list(letters_dict.items())))\n # letters_dict = {}\n\n # cutting_wordlist = wordlist[:]\n # to_cut_idx = []\n # seen = set()\n # while wordlist:\n # word = wordlist.pop(0)\n # cutting_wordlist.pop(0)\n # letters_dict = letters_dict_list.pop(0)\n # seen.add(word)\n # for idx, next_word in enumerate(wordlist):\n # if next_word in seen:\n # to_cut_idx.append(idx)\n # cutting_wordlist.remove(next_word)\n # elif len(next_word) == len(word):\n # if letters_dict_list[idx] == letters_dict:\n # seen.add(next_word)\n # to_cut_idx.append(idx)\n # count += 1\n # cutting_wordlist.remove(next_word)\n\n # if count > max_count:\n # most_anagrams_word = word\n # max_count = count\n # wordlist = cutting_wordlist[:]\n # if to_cut_idx:\n # for idx in reversed(to_cut_idx):\n # letters_dict_list.pop(idx)\n # # if word == 'angor':\n # # print('COUNT: ', count)\n # count = 1\n # to_cut_idx = []\n # print('MAX: ', max_count)\n # return most_anagrams_word\n \n combo_dict = {}\n for word in wordlist:\n sorted_chars = ''.join(sorted(word))\n if combo_dict.get(sorted_chars):\n combo_dict[sorted_chars].append(word)\n else:\n combo_dict[sorted_chars] = [word]\n\n max_count = 0\n word_thats_max = ''\n for key in combo_dict:\n if len(combo_dict[key]) > max_count:\n max_count = len(combo_dict[key])\n word_thats_max = combo_dict[key][0]\n\n return word_thats_max", "def test_top_n_freqs():\n ngrams = NgramFrequencies()\n top_list = [(\"d\", 4), (\"c\", 3), (\"b\", 2), (\"a\", 1)]\n top_freq = ngrams.top_n_freq(top_list, 10)\n assert top_freq == [(\"d\", 0.4), (\"c\", 0.3), (\"b\", 0.2), (\"a\", 0.1)]", "def test_most_similar_topn(self):\n self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=5)), 5)\n self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=10)), 10)\n\n predicted = self.vectors.most_similar('dog.n.01', topn=None)\n self.assertEqual(len(predicted), len(self.vectors.vocab) - 1)\n self.assertEqual(predicted[-1][0], 'gallant_fox.n.01')", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500, hash_function_2)\n\n # This block of code will read a file one word at a time and\n # put the word in `w`\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n current_word = w.lower()\n #get a count for current word\n current_count = ht.get(current_word)\n if current_count is None:\n ht.put(current_word, 1)\n else:\n ht.put(current_word, current_count + 1)\n\n #create an empty list to store top words in\n tuple_list = []\n\n #traverse hash_map to find most used words\n for i in range(ht.capacity):\n if ht._buckets[i] is not None:\n #traverse links at each bucket\n current = ht._buckets[i].head\n while current is not None:\n tuple_list.append((current.key, current.value))\n current = current.next\n\n #create an ordered list out of items\n iter_tuple_quick_sort(tuple_list, len(tuple_list) - 1, 0)\n\n #create a new list to return with passed number arg\n return_list = []\n list_counter = 0\n while list_counter <= number - 1:\n if list_counter == len(tuple_list) - 1:\n break\n else:\n return_list.append(tuple_list[list_counter])\n list_counter += 1\n\n return return_list", "def get_top_n_words(topic_words_dict, n):\n score_wordlist = topic_words_dict.items()\n score_wordlist.sort(key=lambda x: x[1], reverse=True)\n return [word for (word,score) in score_wordlist[:n]]", "def searchphrases(query):\n query_nostopwords = removestopwords(query)\n query_lemmatized = lemmatize(query_nostopwords) #look like\n phraseids = []\n ngramids=[]\n words=query_lemmatized.split()\n query_ngram = \"select id from ngrams where lower(lemmangrams) like lower('%{}%')\".format(query_lemmatized)+\" or lower(lemmangrams) like lower('%{}%')\".format(words[0])\n for word in words[1:]:\n query_ngram=query_ngram+\" or lower(lemmangrams) like lower('%{}%')\".format(word)\n con = it.engine.execute(query_ngram)\n rows_phrase = con.fetchall()\n if rows_phrase:\n ngramids = list(set([str(i[0]) for i in rows_phrase]))\n phraseids.extend(ngramids)\n phraseids = list(set(phraseids))\n results=categorize(phraseids)\n return results", "def predict_currword(word, top_n=10):\r\n try:\r\n return [\r\n (k, v) for k, v in model.WORDS_MODEL.most_common() if k.startswith(word)\r\n ][:top_n]\r\n except KeyError:\r\n raise Exception(\r\n \"Please load predictive models. Run:\\\r\n \\n\\tautocomplete.load()\"\r\n )", "def get_top_n_words(word_list, n):\n\t\n\t#Uses Counter function to create tuples of words and number of instances of word\n\twordCount = Counter(word_list)\n\ttopWords = []\n\n\torderedByFrequency = sorted(wordCount, key=wordCount.get, reverse=True)\n\n\t#create list of inputted 'n' top words\n\tfor i in range (0 , n):\n\t\ttopWords.append(orderedByFrequency[i])\n\n\treturn topWords", "def most_frequent_train(train_data):\n ### YOUR CODE HERE\n tags_counts_for_each_word = {}\n # Filling a dictionary from words and tag tags to their counters\n # Going over the words and counting their tags appearances\n for sentance in train_data:\n for word, tag in sentance:\n # If first time seeing word, adding it's tags count dictionary\n if word not in tags_counts_for_each_word:\n tags_counts_for_each_word[word] = {}\n # Fetching word tags count dictionary\n word_tags_count_dictionary = tags_counts_for_each_word[word]\n # If tag not in word's tags dictionary, initializing the counter\n if tag not in word_tags_count_dictionary:\n word_tags_count_dictionary[tag] = 0\n # Incrementing word tag counter\n word_tags_count_dictionary[tag] += 1\n \n words_maximal_tags = {}\n # Going over each word and finding it's maximal tag\n for word in tags_counts_for_each_word:\n # Fetching all word tags counts\n word_tags_count_dictionary = tags_counts_for_each_word[word]\n \n maximal_tag, maximal_tag_counter = '', 0\n # Finding word tag with maximal tag counter\n for curent_tag, current_counter in word_tags_count_dictionary.items():\n if current_counter > maximal_tag_counter:\n maximal_tag, maximal_tag_counter = curent_tag, current_counter\n \n # Setting the maximal tag for current word\n words_maximal_tags[word] = maximal_tag\n \n return words_maximal_tags\n ### END CODE HERE", "def k_most_talkative(self):\n word_counts = self.get_usercounts() # {u1: 3, u2: 4, }\n word_counts_heap = [(-count, username) for username, count in word_counts.items()] # [(-4, username), (-3, username)]\n heapify(word_counts_heap) # [(-4, u2), (-3, u1)]\n counter = 0\n while word_counts_heap or counter < k:\n _, username = heappop(word_counts_heap)\n counter += 1 # 1, 2\n yield username # u2, u1", "def most_frequent(corpus):\n fd = nltk.FreqDist(corpus)\n return fd.most_common(10)", "def build_thesaurus(home_dir, dir, percent):\n word_count = {}\n top_50 = {}\n word_count = word_count_dict(home_dir, dir, percent)\n file = open(home_dir + dir + 'Thesaurus.txt','w')\n file2 = open(home_dir + dir + 'Top50.txt','w')\n #Sort words based on the frequency of the word\n count = 0\n for word in sorted(word_count, key = word_count.get, reverse = True):\n file.write(word + ' ' + str(word_count[word]) + '\\n')\n if count < len(word_count) / 2:\n file2.write(word + ' ' + str(word_count[word]) + '\\n')\n top_50[word] = word_count[word]\n else:\n break\n count = count + 1\n file.close()\n file2.close()\n return word_count, top_50", "def nmax(num, T, nwords):\n values = []\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(((data['all_words'][n])))\n values.append(round(T[n],3))\n return nwords", "def get_ngramlogprobs_fromcorpus(tokenizedseqs, n):\n return", "def _disambiguate_gloss_by_most_frequent_sense(self, gloss, taggable_tokens, tagged_tokens):\n\t\tdisambiguated_gloss = gloss\n\n\t\tfor undisambiguated_token in taggable_tokens:\n\t\t\tpossible_senses = self._get_possible_wn_senses_for_token(undisambiguated_token)\n\n\t\t\tif len(possible_senses) != 0:\n\t\t\t\tmost_frequent_sense = max(possible_senses, key=lambda sense_key: self.reference_wordnet.sense_keys[sense_key][\"tag_cnt\"])\n\t\t\t\tsynset_offset = self.reference_wordnet.sense_keys[most_frequent_sense][\"synset_offset\"]\n\t\t\telse:\n\t\t\t\tmost_frequent_sense = \"no_wn_sense_existing\"\n\t\t\t\tsynset_offset = \"no_wn_sense_existing\"\n\t\t\t\tself._log_message(\"WARNING: no wn sense found for token {0}\".format(undisambiguated_token))\n\n\t\t\ttoken_index = undisambiguated_token.id\n\n\t\t\tif undisambiguated_token.wn_sense_key is None and undisambiguated_token.wn_synset_offset is None:\n\t\t\t\tdisambiguated_gloss.tokens[token_index].tag = \"mfs\"\n\t\t\t\tdisambiguated_gloss.tokens[token_index].wn_sense_key = most_frequent_sense\n\t\t\t\tdisambiguated_gloss.tokens[token_index].wn_synset_offset = synset_offset\n\t\t\telse:\n\t\t\t\tprint(\"WHAT\")\n\n\t\treturn disambiguated_gloss", "def get_nps(self):\n\n # determine all leaf ids in the parse tree which refer to a noun\n nouns = []\n for node_id in self.parsetree.nodes():\n node = self.parsetree.node[node_id]\n if not node['is_leaf']:\n continue\n leaf_idx = node['left_leaf_idx']\n if leaf_idx >= len(self.tokens):\n continue\n self.words[leaf_idx] == node['label']\n is_noun = self.tokens[leaf_idx].POS.cdata[0] == 'N'\n if is_noun:\n nouns.append(node_id)\n\n NPs = set()\n for noun in nouns:\n NPs.add(self.get_np_for_idx(noun))\n return NPs", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500,hash_function_2)\n\n # This block of code will read a file one word as a time and\n # put the word in `w`. It should be left as starter code.\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n # set up index for hash map\n key = w.lower()\n hash = ht._hash_function(key)\n hash_index = hash % ht.capacity\n cur_bucket = ht._buckets[hash_index]\n new_node = cur_bucket.head\n # if key already exists in hash map, find and increment value\n if ht.contains_key(key):\n while new_node is not None:\n if new_node.key == key:\n new_node.value = new_node.value + 1\n new_node = new_node.next\n # else, add key to hashmap with value of 1\n else:\n cur_bucket.add_front(key, 1)\n # make empty list\n list = []\n # add all buckets to list as tuples\n for i in range(ht.capacity):\n bucket = ht._buckets[i]\n if bucket.head is not None:\n new_node = bucket.head\n while new_node is not None:\n list.append((new_node.key, new_node.value))\n new_node = new_node.next\n # Sort list in reverse by key value (word count)\n # Source: https://www.geeksforgeeks.org/python-program-to-sort-a-list-of-tuples-by-second-item/\n list.sort(key = lambda x: x[1], reverse=True)\n # Return list from 0 to user number\n return(list[0:number])", "def most_influential_words(model, vectorizer, genre_index=0, num_words=10):\n features = vectorizer.get_feature_names()\n max_coef = sorted(enumerate(model.coef_[genre_index]), key=lambda x:x[1], reverse=True)\n return [[features[x[0]], x[1] ] for x in max_coef[:num_words]]", "def test_get_top_n_words_more_number(self):\n expected = ['man', 'happy']\n actual = get_top_n_words({'happy': 2, 'man': 3}, 10)\n self.assertEqual(expected, actual)", "def count_ngrams(self, corpus):\n \n self.unigramcounts = {} # might want to use defaultdict or Counter instead\n self.bigramcounts = {} \n self.trigramcounts = {} \n\n self.total = 2\n ##Your code here\n\n for sentence in corpus:\n temp_1 = get_ngrams(sentence,1)\n temp_2 = get_ngrams(sentence,2)\n temp_3 = get_ngrams(sentence,3)\n for i in range(len(temp_1)):\n if temp_1[i] in self.unigramcounts:\n self.unigramcounts[temp_1[i]] += 1\n else:\n self.unigramcounts[temp_1[i]] = 1\n self.total += 1\n\n for i in range(len(temp_2)):\n if temp_2[i] in self.bigramcounts:\n self.bigramcounts[temp_2[i]] += 1\n else:\n self.bigramcounts[temp_2[i]] = 1\n\n for i in range(len(temp_3)):\n if temp_3[i] in self.trigramcounts:\n self.trigramcounts[temp_3[i]] += 1\n else:\n self.trigramcounts[temp_3[i]] = 1\n return", "def get_ngram(n, sentence):\n if n == 1:\n return sentence\n \n # create phrases model to find words and ngrams that occur at least once\n ngram = Phraser(Phrases(sentence, min_count=1, threshold=1))\n\n # for bigrams and higher grams\n for i in range(3,n):\n ngram = Phraser(Phrases(ngram[sentence], min_count=1, threshold=1))\n return ngram[sentence]", "def fetchphrases(query):\n results=searchphrases(query)\n parents=OrderedDict()\n children=OrderedDict()\n grand=OrderedDict()\n categories=[]\n unigrams={}\n bigrams={}\n trigrams={}\n dups=[]\n for cat in results:\n categories.append(cat[0])\n for cat in results:\n try:\n phrase=str(cat[0]).split()\n if(len(phrase)==1):\n categories.remove(cat[0])\n unigrams[phrase[0]]=cat[1]\n elif(len(phrase)==2):\n phrase=\" \".join(phrase)\n categories.remove(cat[0])\n bigrams[phrase]=cat[1]\n elif(len(phrase)==3):\n phrase=\" \".join(phrase)\n categories.remove(cat[0])\n trigrams[phrase]=cat[1]\n else:\n print \"Rest in categories\"\n except:\n print traceback.format_exc()\n if(len(unigrams)!=0):\n parents=unigrams\n if(len(bigrams)!=0):\n for unigram in unigrams.keys():\n for bigram,freq in bigrams.items():\n if(unigram in bigram):\n dups.append(bigram)\n try:\n children[unigram].append((bigram,freq))\n except:\n children[unigram]=[(bigram,freq)]\n\n else:\n parents[bigram]=freq\n if(len(trigrams)!=0):\n for bigram in bigrams.keys():\n for trigram,freq in trigrams.items():\n if(bigram in trigram):\n dups.append(trigram)\n try:\n grand[bigram].append((trigram,freq))\n except:\n grand[bigram]=[(trigram,freq)]\n else:\n try:\n children[bigram].append((trigram,freq))\n except:\n children[bigram]=[(trigram,freq)]\n elif(len(trigrams)!=0):\n for unigram in unigrams.keys():\n for trigram,freq in trigrams.items():\n if(unigram in trigram):\n dups.append(trigram)\n try:\n children[unigram].append((trigram,freq))\n except:\n children[unigram]=[(trigram,freq)]\n del trigrams[trigram]\n else:\n parents[trigram]=freq\n elif(len(bigrams)!=0):\n parents=bigrams\n if(len(trigrams)!=0):\n for bigram in bigrams.keys():\n for trigram,freq in trigrams.items():\n if(bigram in trigram):\n dups.append(trigram)\n try:\n children[bigram].append((trigram,freq))\n except:\n children[bigram]=[(trigram,freq)]\n del trigrams[trigram]\n else:\n parents[trigram]=freq\n elif(len(trigrams)!=0):\n parents=trigrams\n else:\n parents={}\n\n for d in dups:\n try:\n del parents[d]\n except:\n continue\n\n for key,values in children.items():\n sorted_child=sorted(values,key=lambda x:x[1],reverse=True)\n children[key]=sorted_child\n\n for key,values in grand.items():\n sorted_gchild=sorted(values,key=lambda x:x[1],reverse=True)\n grand[key]=sorted_gchild\n return parents,children,grand", "def correction(word):\r\n return max(candidates(word), key=P)", "def count_words(s, n):\r\n list_of_words=get_listOfWords(s)\r\n res=wrap_with_freq_toList(list_of_words)\r\n res=sortit(res)\r\n top_n=res[0:n]\r\n return top_n\r\n \r\n # TODO: Count the number of occurences of each word in s\r\n # TODO: Sort the occurences in descending order (alphabetically in case of ties)\r\n # TODO: Return the top n words as a list of tuples (<word>, <count>)\r", "def getCounter(self):\n word_count, noun_word_count = Counter(), Counter()\n word_rating, noun_word_rating = defaultdict(list), defaultdict(list)\n docs = self.nlp.pipe(\n self.docs, n_process=1, disable=self.disablelayers)\n \n\n for index, doc in enumerate(docs):\n for token in doc:\n if not token.is_stop and not token.is_punct and token.pos_ in self.pos:\n if token.pos_ == 'PROPN':\n word_count[token.lemma_] += 1\n word_rating[token.lemma_].append(self.ratings[index])\n else:\n noun_word_count[token.lemma_] += 1\n noun_word_rating[token.lemma_].append(self.ratings[index])\n\n # if 0<=proper nouns<=5 found, add regular nouns\n if not word_count or len(word_count) <= 5:\n word_count += noun_word_count\n word_rating = {**word_rating, **noun_word_rating}\n \n word_color = {word: self.getColor(\n ratings)[1] for word, ratings in word_rating.items()}\n word_sentiment = {word: self.getColor(\n ratings)[0] for word, ratings in word_rating.items()}\n\n return word_count, word_color, word_sentiment", "def get_ambiguous_words(self, sort_on=None):\n multis = [word for word in self.word_tag_dict.keys() if len(self.word_tag_dict[word]) > 1]\n if not sort_on:\n multis.sort()\n return multis", "def get_top_n_words(topic_dict, n=5):\n top_words = []\n for num, data in topic_dict.items():\n sorted_words = {k: v for k, v in sorted(data['words'].items(),\n key=lambda x: x[1],\n reverse=True\n )}\n words = sorted_words.keys()\n top_n_words = list(words)[:n]\n top_words.append(', '.join(top_n_words))\n return top_words", "def textrank(doc, kp_count):\n tokens = [normalize(tok) for tok in doc]\n candidates = [normalize(*token) for token in ngrams(doc, 1)]\n\n word_graph = networkx.Graph()\n word_graph.add_nodes_from(set(candidates))\n word_graph.add_edges_from(zip(candidates, candidates[1:]))\n\n kw_ranks = networkx.pagerank_scipy(word_graph)\n\n if 0 < kp_count < 1:\n kp_count = round(kp_count * len(kw_ranks))\n kp_count = int(kp_count)\n\n top_words = {word: rank for word, rank in kw_ranks.items()}\n\n keywords = set(top_words.keys())\n phrases = {}\n\n tok_iter = iter(tokens)\n for tok in tok_iter:\n if tok in keywords:\n kp_words = [tok]\n kp_words.extend(it.takewhile(lambda t: t in keywords, tok_iter))\n n = len(kp_words)\n avg_rank = sum(top_words[w] for w in kp_words) / n\n phrases[' '.join(kp_words)] = avg_rank\n\n top_phrases = top_keys(kp_count, phrases)\n\n return top_phrases", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500, hash_function_2)\n\n # This block of code will read a file one word at a time and\n # put the word in `w`. It should be left as starter code.\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n # convert word to lowercase to avoid inconsistent hash values\n # due to different cases of the same word.\n w = w.lower()\n\n # check if the current word already exists as a key\n if w in keys:\n current_count = ht.get(w) # fetch the current count for that word\n current_count += 1 # increment count by one\n ht.put(w, current_count) # update value for the key\n else:\n # word does not exist in hash map\n keys.add(w) # add current word to keys set\n ht.put(w, 1) # insert key into hash map with value of 1\n\n # fetch unsorted list of tuples from parsed data\n word_count_list = compile_list(ht, keys)\n\n # sort word count tuple list\n word_count_list = word_count_sort(word_count_list)\n\n # initialize and fill final word list\n final_list = []\n\n for index in range(0, number):\n final_list.append(word_count_list[index])\n\n return final_list", "def most_similar(self, token, topn=10):\n vec = self.get_vector(token)\n assert vec is not None, \"Cannot compute similarity between None type vectors.\"\n return self.most_similar_embedding(vec, topn+1)[1:]", "def get_top_n_words(word_list, n):\n\tword_counts = dict()\n\tfor word in word_list:\n\t\tword_counts[word] = 1 + word_counts.get(word,0)\n\n\twords_list = word_counts\n\tsorted_list = sorted(words_list.items(), key = lambda x: x[1])\n\tfinal_list = []\n\n\ti = -1\n\twhile i > ((-1 * n) - 1):\n\t\tfinal_list.append(sorted_list[i])\n\t\ti -= 1\n\n\tlist_without_numbers = [x[0] for x in final_list]\n\n\treturn list_without_numbers" ]
[ "0.6768063", "0.6764637", "0.6511837", "0.635036", "0.63493806", "0.6346821", "0.6346821", "0.63431996", "0.6316743", "0.63167274", "0.6286206", "0.62472194", "0.62286603", "0.6213437", "0.62020063", "0.6191109", "0.61899865", "0.61860347", "0.6180162", "0.6180162", "0.6176259", "0.61722213", "0.61359334", "0.61225855", "0.61225855", "0.6098083", "0.60979414", "0.60729706", "0.60624605", "0.6049681", "0.6048686", "0.60475725", "0.6046158", "0.60246044", "0.60174644", "0.60013294", "0.6000457", "0.59998316", "0.59955466", "0.59923273", "0.59892344", "0.5969767", "0.596657", "0.5959944", "0.59557956", "0.5951418", "0.59405994", "0.5934734", "0.5932863", "0.5926085", "0.59214723", "0.5920301", "0.5917714", "0.5913851", "0.590594", "0.58918977", "0.58854383", "0.58790225", "0.5864389", "0.5860076", "0.58517164", "0.5851674", "0.5851407", "0.58354956", "0.58283573", "0.5827288", "0.5810358", "0.58049315", "0.5796204", "0.5772444", "0.5771998", "0.5771217", "0.5768908", "0.57659376", "0.57631934", "0.5760976", "0.57500166", "0.5746004", "0.57447046", "0.57378775", "0.57305944", "0.5727623", "0.5724954", "0.5721502", "0.571751", "0.57146734", "0.5714672", "0.571175", "0.5702237", "0.57001454", "0.5698038", "0.56899536", "0.56880283", "0.56874317", "0.56829447", "0.5682002", "0.5676862", "0.5672714", "0.56718314", "0.56710356" ]
0.6017455
35
return most used words
def get_words(self, first=10): return get_occurences(self.lemmatized_words)[:first]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_frequent_words(words, most_frequent): \n \n # common_words = Counter(sorted(words))\n # print common_words\n common_words = Counter(sorted(words)).most_common(most_frequent)\n print (common_words )\n most_common_words = [w for w, w_count in common_words]\n return most_common_words", "def most_words(self, n):\n return big_tags", "def get_10_most_frequent_words(tokens):\n\n return FreqDist(word.lower() for word in tokens).most_common(10)", "def most_reducible(wordlist):\n\n\t# We create a memo for reducible words since is_reducible is \n\t# recursive. The keys are the words and the values are the \n\t# number of characters\n\tglobal reducible_words\n\treducible_words = dict()\n\treducible_words['a'], reducible_words['i'] = 1, 1\n\t\n\tword_dict = to_dictionary(wordlist)\n\tfor line in word_dict:\n\t\tis_reducible(line, word_dict)\n\n\t# Varible that will search the memo for the longest word\n\tcurrent_greatest = ''\n\tfor word in reducible_words:\n\t\tif reducible_words[word] > len(current_greatest):\n\t\t\tcurrent_greatest = word\n\tprint(current_greatest)", "def get_most_informative_word(self, documents, vocabulary):\n most_informative_word = None\n most_informative_word_gain = 0\n for word in vocabulary:\n gain = self.get_information_gain(word, documents)\n if most_informative_word == None or gain >= most_informative_word_gain:\n most_informative_word = word\n most_informative_word_gain = gain\n return most_informative_word", "def most_repeating_word(words):\n return max(words, key=most_repeating_letter_count)", "def most_common_word(words, text):\n word_frequency = {w:text.count(w) for w in words}\n return sorted(words, key=word_frequency.get)[-1]", "def task2(dictionary):\n word_count = Counter(dictionary)\n ans = word_count.most_common(10)\n print(ans)\n return ans", "def get_top_words(input_string):\n # count the words\n top_words = Counter(input_string)\n # order the words in descending order\n top_words_ordered = sorted(top_words.items(), key=operator.itemgetter(1), reverse=True)\n # keep the top twenty elements\n top_twenty = top_words_ordered[0:20]\n print(top_twenty)\n return top_twenty", "def extract_frequent_words(records, num_words, no_counts=False):\r\n word_counts = FreqDist(records)\r\n frequent_words = word_counts.most_common(num_words)\r\n if no_counts:\r\n frequent_words = [word[0] for word in frequent_words]\r\n print(\"=====The {:d} Most Frequent Words=====\".format(num_words))\r\n print(frequent_words)\r\n return frequent_words", "def top_words(name):\n row = wiki[wiki['name'] == name]\n word_count_table = row[['word_count']].stack('word_count', new_column_name=['word','count'])\n return word_count_table.sort('count', ascending = False)", "def get_main_words(idioms_set):\r\n main_words = Counter([idiom.split()[-1] for idiom in idioms_set])\r\n print('main words:', '\\n', main_words)\r\n print('top 50 main words:', '\\n', main_words.most_common(50)) \r\n return list(main_words)", "def test_get_top_n_words_same_frequency(self):\n expected = ['happy', 'man']\n actual = get_top_n_words({'happy': 2, 'man': 2}, 2)\n self.assertEqual(expected, actual)\n expected = ['happy']\n actual = get_top_n_words({'happy': 2, 'man': 2}, 1)\n self.assertEqual(expected, actual)", "def top_question_words(args, examples, word_dict):\n word_count = Counter()\n for ex in examples:\n for w in ex['question']:\n w = Dictionary.normalize(w)\n if args.uncased_question:\n w = w.lower()\n if w in word_dict:\n word_count.update([w])\n return word_count.most_common(args.tune_partial)", "def most_frequent_train(train_data):\n ### YOUR CODE HERE\n tags_counts_for_each_word = {}\n # Filling a dictionary from words and tag tags to their counters\n # Going over the words and counting their tags appearances\n for sentance in train_data:\n for word, tag in sentance:\n # If first time seeing word, adding it's tags count dictionary\n if word not in tags_counts_for_each_word:\n tags_counts_for_each_word[word] = {}\n # Fetching word tags count dictionary\n word_tags_count_dictionary = tags_counts_for_each_word[word]\n # If tag not in word's tags dictionary, initializing the counter\n if tag not in word_tags_count_dictionary:\n word_tags_count_dictionary[tag] = 0\n # Incrementing word tag counter\n word_tags_count_dictionary[tag] += 1\n \n words_maximal_tags = {}\n # Going over each word and finding it's maximal tag\n for word in tags_counts_for_each_word:\n # Fetching all word tags counts\n word_tags_count_dictionary = tags_counts_for_each_word[word]\n \n maximal_tag, maximal_tag_counter = '', 0\n # Finding word tag with maximal tag counter\n for curent_tag, current_counter in word_tags_count_dictionary.items():\n if current_counter > maximal_tag_counter:\n maximal_tag, maximal_tag_counter = curent_tag, current_counter\n \n # Setting the maximal tag for current word\n words_maximal_tags[word] = maximal_tag\n \n return words_maximal_tags\n ### END CODE HERE", "def get_top_n_words(column, n):\r\n frequencies = Counter()\r\n column.str.lower().str.split().apply(frequencies.update)\r\n return frequencies.most_common(n)", "def count_words(data, number_word_frequency_results=40):\n current_max_sentence_size = 0\n count_word_frequency = Counter()\n for entry in data:\n print (entry)\n terms_all = [term for term in entry]\n count_word_frequency.update(terms_all)\n return count_word_frequency.most_common(number_word_frequency_results)", "def find_largest_freq():\n words_list = {word for line in lines for word in line} # all words possible\n word_freqs = [(find_freq(word), word) for word in words_list] # list of tuples of words and their frequencies\n max_freq = max(word_freqs)\n return max_freq[0], max_freq[1]", "def count_word(self, most_num):\n with open(self.file_name, 'r') as f:\n data = f.read().lower()\n # characters and single quote not split\n words = re.split(r'[^\\w\\']+', data)\n logging.debug(words)\n most_cnts_words = Counter(words).most_common(most_num)\n print(most_cnts_words)", "def top_chars(phrase):\n phrase = phrase.split()\n letter_counts = {}\n\n # loops through phrase and adds word name to key with the length of the word. If no such key exists, it is created\n for word in phrase:\n for letter in word:\n if letter in letter_counts:\n letter_counts[letter] = letter_counts[letter] + 1\n else:\n letter_counts[letter] = 1\n\n most_used = []\n # loops through each key in the dictionary of usage counts and checks if it has the highest usage count.\n # if it does, it replaces the old elements in the list. If it is used as much as the currently most-used letter,\n # it is appended to the list.\n for key in letter_counts:\n if most_used == []:\n most_used.append(key)\n elif letter_counts[key] > letter_counts[most_used[0]]:\n most_used = [key]\n elif letter_counts[key] == letter_counts[most_used[0]]:\n most_used.append(key)\n\n return sorted(most_used)", "def top_k_frequent(top_k, words, list_of_texts):\n dict_top_freq = {}\n for word in words:\n dict_top_freq[word.lower()] = 0\n for string in list_of_texts:\n if word.lower() in string.lower():\n counter = string.lower().count(word.lower())\n dict_top_freq[word.lower()] += counter\n\n list_top_sorted = sorted(dict_top_freq.items(), key=lambda item: item[1], reverse=True)\n print(list_top_sorted)\n\n list_k = []\n for i in list_top_sorted:\n list_k.append(i[0])\n\n return list_k[:top_k]", "def most_common(self, number=10):\n\n words_full_list = []\n\n for string in self.__corpora:\n words_full_list += string.split()\n\n print(Counter(words_full_list).most_common(number))", "def getTopKCounter(a, K):\n # r = []\n # for i in a:\n # r.extend(i)\n c = Counter(a)\n words = [i[0] for i in c.most_common(K)]\n return words", "def computeWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n # print token_map.items()\n return sorted(token_map.items(), key = lambda x : x[1], reverse = True)", "def get_top_n_words(word_list, n):\n\tfreq_dict = make_freq_dict (word_list) # get a dictionary\n\tordered_by_frequency = sorted(freq_dict, key=freq_dict.get, reverse=True) # sort\n\tprint ordered_by_frequency[0:n] # print\n\treturn ordered_by_frequency[0:n]", "def topCommonwords(self,value=5):\n out=self.df.withColumn('word', explode(split(col('name'), ' '))) \\\n .withColumn('norm_word',trim(regexp_replace('word','[^a-zA-Z0-9 ]', ''))) \\\n .filter(col('norm_word') !='')\\\n .groupBy('norm_word')\\\n .count()\\\n .sort('count', ascending=False)\\\n .select('norm_word').limit(value)\n out.withColumnRenamed('norm_word','Top english name in pubname').write \\\n .mode(\"overwrite\").csv('{}pubname/'.format(self.target))\n\n return out.rdd.map(lambda l:l.norm_word).collect()", "def count_words(self,top_only=True):\n if top_only:\n self.top_skill_list()\n else:\n self.all_skill_list()\n word_counts = Counter(self.skill_list)\n top_n = word_counts.most_common(len(word_counts))\n self.feature = []\n proportion = []\n for i in top_n:\n self.feature.append(i[0])\n proportion.append(i[1])\n self.coff = 1./(np.log(proportion)+1)\n return", "def create_most_freq_word_list(filename):\n global most_frequent_words\n\n if not most_frequent_words:\n with open(filename) as fp:\n for line in fp:\n most_frequent_words.append(re.sub(r'\\s+', '', line))", "def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]", "def max_word_value(words):\n return max(words, key=calc_word_value)", "def find_long_and_common_words(tokens):\n\n return sorted([word for word in set(tokens) if len(word) > 7 and FreqDist(tokens)[word] > 7])", "def most_common_word_in_web_page(words, url, user_agent=requests):\n response = user_agent.get(url)\n return most_common_word(words, response.text)", "def keep_top_words(self, M, Mprint=20):\n freq = self.data.sum(axis=0)\n freq = np.squeeze(np.asarray(freq))\n idx = np.argsort(freq)[::-1]\n idx = idx[:M]\n self.keep_words(idx)\n print('most frequent words')\n for i in range(Mprint):\n print(' {:3d}: {:10s} {:6d} counts'.format(i, self.vocab[i], freq[idx][i]))\n return freq[idx]", "def getMaxKey(self):\n if len(self.word_to_freq) == 0:\n return \"\"\n\n tail = self.tail.prev\n while tail is not None:\n if len(tail.words) > 0:\n return next(iter(tail.words))\n else:\n tail = tail.prev\n\n return \"\"", "def most_common_words(counts, n=-1):\n\n result = sorted(list(counts.items()), key=lambda x: x[1], reverse=True)\n\n if n == -1:\n return result\n else:\n return result[:n]", "def most_common(data_word):\n stop_words = set(stopwords.words(\"english\"))\n\n #filter out stop words\n data_filtered = [word for word in data_word if word not in stop_words]\n cnt = Counter(data_filtered)\n\n #count most common words\n common = cnt.most_common(100)\n return common", "def find_max_words(files):\n all_num_words = list()\n for file in files:\n num_words =[len(line.split()) for line in open(file, 'r')]\n all_num_words = all_num_words + num_words\n #print (all_num_words)\n return max(all_num_words)", "def most_words_and_longest(self, n):\n return big_tags", "def kMostFrequentWords(filename, k):\n wordcounts = countWords(filename)\n return wordcounts.most_common(k)", "def maxcompChooseWord(hand, wordList, n):\n # 电脑给出最优解\n point = 0\n maxword = ''\n for word in wordList:\n newword1 = copy.deepcopy(word)\n newword2 = copy.deepcopy(word)\n if isValidWord(newword1, hand, wordList):\n p = getWordScore(newword2, n)\n if p > point:\n point = p\n maxword = word\n if point == 0:\n return None\n else:\n return maxword, point", "def most_common_words(visual_fld, num_visualize):\n words = open(os.path.join(visual_fld, 'vocab.tsv'), 'r').readlines()[:num_visualize]\n words = [word for word in words]\n file = open(os.path.join(visual_fld, 'vocab_' + str(num_visualize) + '.tsv'), 'w')\n for word in words:\n file.write(word)\n file.close()", "def calculate_most_popular(text, n_populars, steam=False):\n fdist = calculate_fdist(text, steam)\n term = []\n for key, value in fdist.items():\n term.append((key, value))\n term.sort(key=lambda x: int(x[1]), reverse=True)\n return term[:n_populars]", "def longest_words(self, n=10):\n return sorted(set(self.text), key=len, reverse=True)[:n]", "def get_top_n_words(word_list, n):\n d = dict()\n for w in word_list:\n d[w] = d.get(w, 0) + 1\n ordered_by_frequency = sorted(d, key=d.get, reverse=True)\n return ordered_by_frequency[0:n]", "def most_common(filename,n):\n\tfreq_dict = dictionary_creation(filename)\n\tt = []\n\tfor key, value in freq_dict.items():\n\t\tt.append((value,key))\n\t\tt.sort(reverse=True)\n\twordlist = []\n\tfreqlist = []\n\tprint n, 'most common words:'\n\tfor freq,word in t[0:n]:\n\t\tprint word,'\\t', freq\n\t\twordlist.append(word)\n\t\tfreqlist.append(freq)\n\treturn wordlist,freqlist", "def comp_choose_word(hand, word_list):\n maxscore = 0\n maxword = \"\" \n for n in range(calculate_handlen(hand)):\n perms = get_perms(hand, n)\n for word in perms:\n wordscore = get_word_score(word, HAND_SIZE)\n if wordscore > maxscore:\n if word not in word_list:\n continue\n else:\n maxscore = wordscore\n maxword = word\n return maxword\n # TO DO...", "def count_words(s, n):\r\n list_of_words=get_listOfWords(s)\r\n res=wrap_with_freq_toList(list_of_words)\r\n res=sortit(res)\r\n top_n=res[0:n]\r\n return top_n\r\n \r\n # TODO: Count the number of occurences of each word in s\r\n # TODO: Sort the occurences in descending order (alphabetically in case of ties)\r\n # TODO: Return the top n words as a list of tuples (<word>, <count>)\r", "def most_common_words(n):\n with open(os.path.join('visualization', 'vocab.tsv')) as fd:\n words = fd.readlines()[:n]\n words = [word for word in words]\n save_path = os.path.join('visualization', 'vocab_' + str(n) + '.tsv')\n with open(save_path, 'w') as fd:\n for word in words:\n fd.write(word)", "def get_top_n_words(word_list, n):\n\tfreqs = get_word_frequencies(word_list)\n\tfreq_words = sorted(freqs, key=freqs.get, reverse=False)\n\treturn freq_words[:n]", "def calculate_most_frequent_n_words(self, input_string: str, n: int) \\\n -> List[WordFrequencyStructure]:\n results = \\\n self._typed_sorted_result(input_string=input_string)\n\n return results[:n]", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500, hash_function_2)\n\n # This block of code will read a file one word at a time and\n # put the word in `w`\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n current_word = w.lower()\n #get a count for current word\n current_count = ht.get(current_word)\n if current_count is None:\n ht.put(current_word, 1)\n else:\n ht.put(current_word, current_count + 1)\n\n #create an empty list to store top words in\n tuple_list = []\n\n #traverse hash_map to find most used words\n for i in range(ht.capacity):\n if ht._buckets[i] is not None:\n #traverse links at each bucket\n current = ht._buckets[i].head\n while current is not None:\n tuple_list.append((current.key, current.value))\n current = current.next\n\n #create an ordered list out of items\n iter_tuple_quick_sort(tuple_list, len(tuple_list) - 1, 0)\n\n #create a new list to return with passed number arg\n return_list = []\n list_counter = 0\n while list_counter <= number - 1:\n if list_counter == len(tuple_list) - 1:\n break\n else:\n return_list.append(tuple_list[list_counter])\n list_counter += 1\n\n return return_list", "def get_top_n_words(word_list, n):\n word_counts = dict()\n\n for word in word_list:\n freq = word_counts.get(word, 1)\n word_counts[word] = freq + 1\n\n ordered_by_frequency = sorted(word_counts, key=word_counts.get, reverse=True)\n\n return ordered_by_frequency[0:n]", "def most_influential_words(model, vectorizer, genre_index=0, num_words=10):\n features = vectorizer.get_feature_names()\n max_coef = sorted(enumerate(model.coef_[genre_index]), key=lambda x:x[1], reverse=True)\n return [[features[x[0]], x[1] ] for x in max_coef[:num_words]]", "def get_vocabulary_words_with_counts(txt, min_word_freq):\n\n data = txt.split()\n counter = collections.Counter(data)\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n # keep words that occur more than min_word_freq\n top_count_pairs = [pair for pair in count_pairs if pair[1] > min_word_freq]\n return top_count_pairs", "def most_frequent(corpus):\n fd = nltk.FreqDist(corpus)\n return fd.most_common(10)", "def most_influential_words_doc(doc, tfidf_words):\n words_found = []\n for d in doc.split():\n for t in tfidf_words:\n if d == t[0]:\n if d not in words_found:\n words_found.append(d)\n return words_found", "def most_common(self):\n # Example ouput : ['so', 6]\n return list(sorted(self.frequencies().items(), key = lambda x: x[1], reverse=True)[0])\n #sorted = sorted(self.frequencies().items(), key = lambda x: x[1], reverse=True)\n #return sorted[0] #not list", "def test_get_top_n_words_ideal(self):\n expected = ['man']\n actual = get_top_n_words({'happy': 2, 'man': 3}, 1)\n self.assertEqual(expected, actual)", "def nmax(num, T, nwords):\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(data['all_words'][n])\n return nwords", "def nmax(num, T, nwords):\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(data['all_words'][n])\n return nwords", "def longest_word_length(words):", "def find_long_words(tokens):\n\n return sorted([word for word in set(tokens) if len(word) > 15])", "def most_wordy(data_sent):\n #initialize lists\n sylls = []\n words = []\n sents = []\n fkgs = []\n\n #looping through sentences to find lengthy sentences\n for sent in data_sent:\n token = word_tokenize(sent)\n word = len(token)\n if word > 40:\n\n #appending to lists\n syll = textstat.syllable_count(sent)\n sylls.append(syll)\n words.append(word)\n sents.append(sent)\n fkgs.append(fkg(int(word), 1, int(syll)))\n\n #transfer information to dataframe\n df_wordy = pd.DataFrame({'Words' : words,\n 'Syllables' : sylls,\n 'Flesch Kincaid Grade Level': fkgs,\n 'Sentence' : sents}, columns = [\"Words\", \"Syllables\", \"Flesch Kincaid Grade Level\", \"Sentence\"])\n df_wordy.sort_values(\"Words\", ascending = False, inplace = True)\n return df_wordy", "def get_top_n_words(word_list, n):\n\t\n\t#Uses Counter function to create tuples of words and number of instances of word\n\twordCount = Counter(word_list)\n\ttopWords = []\n\n\torderedByFrequency = sorted(wordCount, key=wordCount.get, reverse=True)\n\n\t#create list of inputted 'n' top words\n\tfor i in range (0 , n):\n\t\ttopWords.append(orderedByFrequency[i])\n\n\treturn topWords", "def get_top_tweet_ngrams(corpus, dim=2, n=None):\r\n vec = CountVectorizer(ngram_range=(dim, dim)).fit(corpus)\r\n bag_of_words = vec.transform(corpus)\r\n sum_words = bag_of_words.sum(axis=0) \r\n words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]\r\n words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True)\r\n return words_freq[:n]", "def predict_currword_given_lastword(first_word, second_word, top_n=10):\r\n return Counter(\r\n {\r\n w: c\r\n for w, c in model.WORD_TUPLES_MODEL[first_word.lower()].items()\r\n if w.startswith(second_word.lower())\r\n }\r\n ).most_common(top_n)", "def get_top_n_words(word_list, n):\n words = []\n\n # Change all words to lowercase\n for word in word_list:\n word = str.lower(word)\n if word not in words:\n words.append(word)\n\n # Calculate frequency of each word\n frequency = []\n for word in words:\n word_count = 0\n for test in word_list:\n if word == test:\n word_count += 1\n frequency.append(word_count)\n\n dic = dict()\n for i, word in enumerate(words):\n dic[frequency[i]] = word\n\n # Sort dictionary to return ranks\n keys = dic.keys()\n keys = sorted(keys)\n words_ranked = []\n for key in keys:\n words_ranked.append(dic.get(key))\n words_ranked = words_ranked[::-1]\n words_ranked = words_ranked[:n]\n return words_ranked", "def _top_n_words(n, f_name):\n word_dict, idx_dict, word_cnt = _extract_words(f_name)\n print (\"number of words: %d\" % len(word_cnt))\n n = min(len(word_cnt), n)\n np_cnt = np.array(word_cnt)\n idx = np.argpartition(np_cnt, -n)[-n:]\n res = []\n for i in idx:\n res.append((idx_dict[i], np_cnt[i]))\n res.sort(key=lambda t: t[1], reverse=True)\n return res", "def get_most_probable_sentence(\n self,\n suggestions: List[List[str]]\n ) -> str:\n sent_word_count = len(suggestions)\n suggestions = [[tok] for tok in ContextModel.START_TOKENS] + suggestions + \\\n [[tok] for tok in ContextModel.END_TOKENS]\n memory = [[MemoryItem(score=0.0, decoded=tuple())], [MemoryItem(score=0.0, decoded=tuple())]]\n for t in range(2, len(suggestions)):\n memory.append([])\n for i, word in enumerate(suggestions[t]):\n mx_score, pick_1, pick_2 = 0, 0, 0\n for j, suggestion_1 in enumerate(suggestions[t - 1]):\n for k, suggestion_2 in enumerate(suggestions[t - 2]):\n curr_score = memory[-3][k].score \\\n + self.model_dict.get((suggestion_2, suggestion_1), self.default_prob) \\\n + self.model_dict.get((suggestion_1, word), self.default_prob) \\\n + self.model_dict.get((suggestion_2, word), self.default_prob)\n if curr_score > mx_score:\n mx_score, pick_1, pick_2 = curr_score, j, k\n memory_item = MemoryItem(score=mx_score, decoded=memory[-3][pick_2].decoded + (pick_2, pick_1,))\n memory[-1].append(memory_item)\n memory = memory[1:]\n\n decoded = ' '.join([suggestions[t][i] for t, i in enumerate(memory[-1][0].decoded[-sent_word_count:],\n start=2)])\n # score = memory[-1][0].score\n return decoded", "def top_three_letters(string):\n print(Counter(string))\n print(Counter(string).most_common(3))", "def most_similar(self, words: [str], top_n=3, metric='cosine') -> [(str, float)]:\n if len(words) == 0:\n return []\n\n vec = self.mean(words)\n if numpy.count_nonzero(vec) == 0:\n return []\n\n return [w for w, sim in self.most_similar_vec(vec=vec, top_n=top_n, exclude_words=words, metric=metric)]", "def find_most_anagrams_from_wordlist(wordlist):\n # max_count = 1\n # count = 1\n # letters_dict = {}\n # most_anagrams_word = \"\"\n # letters_dict_list = []\n # for word in wordlist:\n # for letter in word:\n # letters_dict[letter] = letters_dict.get('letter', 0) + 1\n # letters_dict_list.append(sorted(list(letters_dict.items())))\n # letters_dict = {}\n\n # cutting_wordlist = wordlist[:]\n # to_cut_idx = []\n # seen = set()\n # while wordlist:\n # word = wordlist.pop(0)\n # cutting_wordlist.pop(0)\n # letters_dict = letters_dict_list.pop(0)\n # seen.add(word)\n # for idx, next_word in enumerate(wordlist):\n # if next_word in seen:\n # to_cut_idx.append(idx)\n # cutting_wordlist.remove(next_word)\n # elif len(next_word) == len(word):\n # if letters_dict_list[idx] == letters_dict:\n # seen.add(next_word)\n # to_cut_idx.append(idx)\n # count += 1\n # cutting_wordlist.remove(next_word)\n\n # if count > max_count:\n # most_anagrams_word = word\n # max_count = count\n # wordlist = cutting_wordlist[:]\n # if to_cut_idx:\n # for idx in reversed(to_cut_idx):\n # letters_dict_list.pop(idx)\n # # if word == 'angor':\n # # print('COUNT: ', count)\n # count = 1\n # to_cut_idx = []\n # print('MAX: ', max_count)\n # return most_anagrams_word\n \n combo_dict = {}\n for word in wordlist:\n sorted_chars = ''.join(sorted(word))\n if combo_dict.get(sorted_chars):\n combo_dict[sorted_chars].append(word)\n else:\n combo_dict[sorted_chars] = [word]\n\n max_count = 0\n word_thats_max = ''\n for key in combo_dict:\n if len(combo_dict[key]) > max_count:\n max_count = len(combo_dict[key])\n word_thats_max = combo_dict[key][0]\n\n return word_thats_max", "def most_common_words(self,\r\n words,\r\n number=10,\r\n dictionaryobject=None,\r\n reverse=False):\r\n\r\n if not dictionaryobject:\r\n dictionaryobject = self.word_dict\r\n\r\n temp_words = how_common(entrylist=words,\r\n dictionaryobject=dictionaryobject,\r\n display=display)\r\n number = min([number,len(temp_words)])\r\n\r\n\r\n\r\n if not reverse:\r\n temp_words = temp_words[0:number]\r\n else:\r\n temp_words = temp_words[len(temp_words)-number:len(temp_words)]\r\n\r\n return [x_temp[0] for x_temp in temp_words]", "def used_words(self):\n return self._used_words", "def get_related_words(initial_words, model):\n \n unseen = initial_words\n \n seen = defaultdict(int)\n \n max_size = 1000 # could be greater\n \n while unseen and len(seen) < max_size:\n if len(seen) % 50 == 0: \n print('seen length : {}'.format(len(seen)))\n \n node = unseen.pop(0)\n \n new_expanding = [w for w, s in model.most_similar(node, topn=20)]\n \n unseen += new_expanding\n \n seen[node] += 1\n \n # optimal: 1. score function could be revised\n # optimal: 2. using dymanic programming to reduce computing time\n \n return seen", "def get_related_words(initial_words, model):\n \n unseen = initial_words\n \n seen = defaultdict(int)\n \n max_size = 1000 # could be greater\n \n while unseen and len(seen) < max_size:\n if len(seen) % 50 == 0: \n print('seen length : {}'.format(len(seen)))\n \n node = unseen.pop(0)\n \n new_expanding = [w for w, s in model.most_similar(node, topn=20)]\n \n unseen += new_expanding\n \n seen[node] += 1\n \n # optimal: 1. score function could be revised\n # optimal: 2. using dymanic programming to reduce computing time\n \n return seen", "def get_most_common(self, lst):\n data = Counter(lst)\n mc = data.most_common(2) \n #if len(mc) == 1 or (mc[0][1] != (mc[1][1])):\n # return mc[0][0]\n #return \"AMB\"\n return data.most_common(1)[0][0]", "def print_most_common(hist, num=10):\n t = most_common(hist)\n print 'The most common words are:'\n for freq, word in t[:num]:\n print word, '\\t', freq", "def most_similar_word(self,word, word_set):\n\t max_sim = -1.0\n\t sim_word = \"\"\n\t for ref_word in word_set:\n\t sim = self.word_similarity(word, ref_word)\n\t if sim > max_sim:\n\t max_sim = sim\n\t sim_word = ref_word\n\t return sim_word, max_sim", "def count_words(path, max_vocab_size=40000, tok=False):\n counts = collections.Counter()\n for words in read_file(path, tok):\n for word in words:\n counts[word] += 1\n\n vocab = [word for (word, _) in counts.most_common(max_vocab_size)]\n return vocab", "def extract_frequent_words(df:pd.DataFrame):\n x = (pd.pivot_table(df.drop(['text', 'percent_correct'], axis=1),\n index='success_lvl',\n aggfunc=['sum', 'mean']) # Count shows ~50/50 split\n .transpose()\n .loc[:, ['high', 'low']]\n .unstack(level=0))\n\n # Rank the most frequent phrases\n x['high_rank'] = x[('high', 'sum')].rank(method='dense', ascending=False)\n x['low_rank'] = x[('low', 'sum')].rank(method='dense', ascending=False)\n print(x[x.high_rank <= 10.].sort_values('high_rank'))\n print(x[x.low_rank <= 10.].sort_values('low_rank'))", "def commonWords(self):\n #utilize similar code used in stats.py\n exclude = set(('!', '.', '?'))\n freq = Stats()\n fullText = []\n #Parse email\n for x in range(self.getSCount()):\n #Simplify email into string of words separated by single space\n sString = self[x].lower()\n sString = ''.join(char for char in sString if char not in exclude)\n sString = sString.split()\n fullText = fullText + sString\n\n #Call findFreqDic() to find frequencies of words\n freqDict = freq.findFreqDic(fullText)\n\n #Analyze 10 words\n numTopic = 10\n \n #Find most and least common calling topNSort and bottomNSort\n mostCommon = freq.topNSort(freqDict, numTopic)\n leastCommon = freq.bottomNSort(freqDict, numTopic)\n \n most = list(mostCommon.keys())\n least = list(leastCommon.keys())\n \n return most, least", "def print_most_frequent(ngrams, num=10):\r\n for n in sorted(ngrams):\r\n print('----- {} most common {}-grams -----'.format(num, n))\r\n for gram, count in ngrams[n].most_common(num):\r\n print('{0}: {1}'.format(' '.join(gram), count))\r\n print('')", "def get_mostFrequent(self, n=5):\r\n pass", "def get_mostFrequent(self, n=5):\r\n pass", "def print_most_frequent(ngrams, num=10):\n for n in sorted(ngrams):\n print('----- {} most common {}-grams -----'.format(num, n))\n for gram, count in ngrams[n].most_common(num):\n print('{0}: {1}'.format(' '.join(gram), count))\n print('')", "def print_most_frequent(ngrams, num=10):\n for n in sorted(ngrams):\n print('----- {} most common {}-grams -----'.format(num, n))\n for gram, count in ngrams[n].most_common(num):\n print('{0}: {1}'.format(' '.join(gram), count))\n print('')", "def count_tokens(txt_to_tokenise_and_count):\n\n txt_tokens = nltk.word_tokenize(txt_to_tokenise_and_count) \n counts = Counter(txt_tokens)\n\n print (counts.most_common(10))\n return counts", "def most_repeating_letter_count(word):\n return Counter(word.lower()).most_common(1)[0][1]", "def print_most_frequent(ngrams, num=10):\n for n in sorted(ngrams):\n print('----- {} most common {}-grams -----'.format(num, n))\n for gram, count in ngrams[n].most_common(num):\n print('{0}: {1}'.format(' '.join(gram), count))\n print('')", "def _find_top_idf_words(company_names):\n feature_as_list = remove_special_chars(company_names)\n feature_as_list = [x.lower().strip() for x in feature_as_list]\n feature_as_list = set(feature_as_list)\n features = get_top_idf_features(feature_as_list, 100, 1)\n print(features)\n return features", "def get_top_n_words(filename, n, to_search_word_or_not, word_to_serach, get_random):\n\n histogram = get_word_list(filename, True) #calls histogram file\n output = []\n for word,value in histogram.items(): #sorts words into new histogram that has value, word pairs to sort\n output.append((value,word))\n output.sort()\n output.reverse() #sorting from greatest to least\n final_n_output = []\n\n if get_random == True: #possibly sending getrandom funtion to get random words\n random_word = getrandom(histogram)\n else:\n random_word = None\n\n if to_search_word_or_not == True: #possibly sending getrandom funtion to get random words\n num_of_word = search_for_a_word(histogram, word_to_serach)\n else:\n num_of_word = None\n\n for i in range(n):\n final_n_output.append(output[i]) #making a final output list\n\n print(random_word)\n\n return final_n_output, num_of_word, random_word", "def k_most_talkative(self):\n word_counts = self.get_usercounts() # {u1: 3, u2: 4, }\n word_counts_heap = [(-count, username) for username, count in word_counts.items()] # [(-4, username), (-3, username)]\n heapify(word_counts_heap) # [(-4, u2), (-3, u1)]\n counter = 0\n while word_counts_heap or counter < k:\n _, username = heappop(word_counts_heap)\n counter += 1 # 1, 2\n yield username # u2, u1", "def _get_highest_nperms(n, words, dawg):\n best = (None, 0, []) # word, num perms, nperms\n\n words_counted = 0\n total_perms_counted = 0\n\n for word in words:\n words_counted += 1\n # Generate all n-length permutations of word\n nperms = [w for w in dawg.gen_completions('', word)]\n total_perms_counted += len(nperms)\n\n nperms = [w for w in nperms if len(w) == n]\n\n for nperm in nperms:\n try:\n words.remove(nperm)\n except ValueError:\n pass\n\n nperms = list(set(nperms))\n\n if len(nperms) > best[1]:\n best = (word, len(nperms), nperms)\n\n print(f'words permuted: {words_counted}')\n print(f'total permutations: {total_perms_counted}')\n return best", "def find_longest_word(words):\n \n # lengths = []\n\n # for word in words: \n # l = len(word)\n # lengths.append(l)\n # result = max(lengths)\n # return result\n\n #ALT SOLUTION ONE LINE \n return len(max(words, key=lambda words: len(words)))", "def test_get_top_n_words_more_number(self):\n expected = ['man', 'happy']\n actual = get_top_n_words({'happy': 2, 'man': 3}, 10)\n self.assertEqual(expected, actual)", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500,hash_function_2)\n\n # This block of code will read a file one word as a time and\n # put the word in `w`. It should be left as starter code.\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n # set up index for hash map\n key = w.lower()\n hash = ht._hash_function(key)\n hash_index = hash % ht.capacity\n cur_bucket = ht._buckets[hash_index]\n new_node = cur_bucket.head\n # if key already exists in hash map, find and increment value\n if ht.contains_key(key):\n while new_node is not None:\n if new_node.key == key:\n new_node.value = new_node.value + 1\n new_node = new_node.next\n # else, add key to hashmap with value of 1\n else:\n cur_bucket.add_front(key, 1)\n # make empty list\n list = []\n # add all buckets to list as tuples\n for i in range(ht.capacity):\n bucket = ht._buckets[i]\n if bucket.head is not None:\n new_node = bucket.head\n while new_node is not None:\n list.append((new_node.key, new_node.value))\n new_node = new_node.next\n # Sort list in reverse by key value (word count)\n # Source: https://www.geeksforgeeks.org/python-program-to-sort-a-list-of-tuples-by-second-item/\n list.sort(key = lambda x: x[1], reverse=True)\n # Return list from 0 to user number\n return(list[0:number])", "def mostFrequentNGram(text,n):\n\n ngram = calculateNGrams(text,n)\n return counterFrequency(ngram)", "def min_top_word(file_reader_last_read_list):\n min_word = \"|||\"\n for file_reader_and_last_read in file_reader_last_read_list:\n if file_reader_and_last_read[\"last_read\"][\"word\"] < min_word and file_reader_and_last_read[\"last_read\"][\"word\"]\\\n != \"\":\n min_word = file_reader_and_last_read[\"last_read\"][\"word\"]\n return min_word", "def max_wupa(context_sentence, ambiguous_word):\r\n\r\n result = {}\r\n for i in wn.synsets(ambiguous_word):\r\n result[i] = sum(max([i.wup_similarity(k) for k in wn.synsets(j)]+[0]) \\\r\n for j in word_tokenize(context_sentence))\r\n result = sorted([(v,k) for k,v in result.items()],reverse=True)\r\n return result", "def find_frequent_words(word_frequencies, amount=50):\n alphabetically_sorted = sorted(word_frequencies.most_common(amount), key=lambda tup: tup[0])\n final_sorted = sorted(alphabetically_sorted, key=lambda tup: tup[1], reverse=True)\n list1 = [i[0] for i in final_sorted]\n\n list2 = [i[1] for i in final_sorted]\n return list1, list2" ]
[ "0.7717855", "0.7710902", "0.7526036", "0.7515229", "0.7457172", "0.7383764", "0.71464837", "0.71408725", "0.71277374", "0.7127556", "0.7100289", "0.70912224", "0.70876014", "0.7072795", "0.70574975", "0.70387983", "0.6993421", "0.6988404", "0.69723606", "0.6950494", "0.6948689", "0.6941304", "0.6938613", "0.6916473", "0.6911338", "0.69092673", "0.6907776", "0.69067895", "0.6884123", "0.68808264", "0.68760693", "0.6875248", "0.6847212", "0.68240714", "0.6786884", "0.6784537", "0.6763565", "0.67565453", "0.6752421", "0.6748435", "0.6727024", "0.6688584", "0.66842246", "0.6678287", "0.6676216", "0.6655041", "0.6650574", "0.6644075", "0.66411537", "0.6640355", "0.6631318", "0.6629226", "0.6621396", "0.65978765", "0.6588355", "0.65843374", "0.6580017", "0.6578872", "0.6576622", "0.6576622", "0.65704465", "0.6561874", "0.6525222", "0.6509021", "0.6501351", "0.6495364", "0.6493747", "0.6493345", "0.6488025", "0.64842117", "0.64761704", "0.6471122", "0.64685726", "0.64500433", "0.64445585", "0.64445585", "0.6432251", "0.64283395", "0.6427238", "0.6423359", "0.6422808", "0.6420062", "0.6414825", "0.6410865", "0.6410865", "0.6409801", "0.6409801", "0.6405704", "0.63924015", "0.6377379", "0.63757807", "0.63724154", "0.63714933", "0.636656", "0.6362562", "0.63620347", "0.636092", "0.63541627", "0.6354008", "0.63532686", "0.63531977" ]
0.0
-1
lemmatize words in noun_phrases, exclude phrases with `STOPWORDS`
def lemmatized_phrases(self): phrases = [set(lower_words(TextBlob(p).words.lemmatize())) for p in self.blob.noun_phrases] return [' '.join(p) for p in phrases if not STOPWORDS.intersection(p)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lemmatize_verbs(self):\n lemmas = []\n # lemmas = \"\"\n for word in self.words:\n lemma = wn.lemmatize(word, pos='v')\n lemmas.append(lemma)\n # lemmas += f\"{lemma} \"\n self.words = lemmas\n return self", "def lemmatize_nouns(self, words):\n\t\tlemmatizer = WordNetLemmatizer()\n\t\tlemmas = []\n\t\tfor word in words:\n\t\t\tlemma = lemmatizer.lemmatize(word, pos='n')\n\t\t\tlemmas.append(lemma)\n\t\treturn lemmas", "def clean_stopwords_lemmatize(text):\n tokens = clean_stopwords(text)\n tokens = lemmatize_tokens(tokens)\n # count = Counter(tokens)\n # c = count.most_common(15)\n # b = [str(i[0]) for i in c]\n # keywords = [t for t in tokens if t in b]\n news = ['ESPN', 'espn', 'foxsports', 'fox', 'cnn', 'yahoo', '•', '-', '●']\n keywords = [k for k in tokens if not k in news]\n return keywords", "def lemmatiser(list_of_words, tag):\n \n output = []\n for entry in list_of_words:\n if phrases:\n # just get the rightmost word\n word = entry[-1]\n entry.pop()\n else:\n word = entry\n if translated_option.startswith('u'):\n if word in taglemma:\n word = taglemma[word]\n else:\n if word == 'x':\n word = 'Other'\n # only use wordnet lemmatiser when appropriate\n elif not dependency:\n if word in wordlist:\n word = wordlist[word]\n word = lmtzr.lemmatize(word, tag)\n # do the manual_lemmatisation\n else:\n if word in wordlist:\n word = wordlist[word]\n if phrases:\n entry.append(word)\n output.append(entry)\n else:\n output.append(word)\n return output", "def lemmatiser(list_of_words, tag):\n output = []\n for word in list_of_words:\n if translated_option.startswith('u'):\n word = taglemma.get(word.lower(), 'Other')\n else:\n word = wordlist.get(word, lmtzr.lemmatize(word, tag))\n if not preserve_case:\n word = word.lower()\n output.append(word)\n return output", "def basic_clean(text):\n wnl = nltk.stem.WordNetLemmatizer()\n stopwords = stopwords.words('english') + ADDITIONAL_STOPWORDS\n text = (unicodedata.normalize('NFKD', text)\n .encode('ascii', 'ignore')\n .decode('utf-8', 'ignore')\n .lower())\n words = re.sub(r'[^\\w\\s]', '', text).split()\n return [wnl.lemmatize(word) for word in words if word not in stopwords]", "def _lemmatize_verbs(words):\n lemmatizer = WordNetLemmatizer()\n lemmas = []\n for word in words:\n lemma = lemmatizer.lemmatize(word, pos='v')\n lemmas.append(lemma)\n return lemmas", "def normalize_words(document):\n stopwords = set(nltk.corpus.stopwords.words('english'))\n lemmatizer = nltk.stem.wordnet.WordNetLemmatizer()\n for token in document:\n token = token.lower()\n if token in string.punctuation: continue\n if token in stopwords: continue\n yield lemmatizer.lemmatize(token)", "def test_lemmatization():\n normalizer = TextNormalizer(stopwords=False, lemmatize=True)\n X = normalizer.transform([[\"start running better old friend\"]])\n assert X[\"corpus\"][0] == [\"start\", \"run\", \"well\", \"old\", \"friend\"]", "def lemmatize_verbs(self, words):\n\t\tlemmatizer = WordNetLemmatizer()\n\t\tlemmas = []\n\t\tfor word in words:\n\t\t\tlemma = lemmatizer.lemmatize(word, pos='v')\n\t\t\tlemmas.append(lemma)\n\t\treturn lemmas", "def lemmatisation(tokens):\n pos_tag = nltk.pos_tag(tokens)\n lemmatiser = nltk.WordNetLemmatizer()\n wornet_tags = {\"J\": wordnet.ADJ, \"N\": wordnet.NOUN, \"V\": wordnet.VERB, \"R\": wordnet.ADV}\n words = []\n for word, tag in pos_tag:\n proper_tag = wornet_tags.get(tag[0].upper(), wordnet.NOUN)\n words.append(lemmatiser.lemmatize(word, proper_tag))\n return words", "def lemmatize(text, nlp):\n\n return [word.lemma_ for word in nlp(text)]", "def get_keywords(text):\n tokens = [word.lower() for word in word_tokenize(text)]\n\n # tag words as verb, noun etc\n tagged_words = pos_tag(tokens)\n\n # retrieve list of boring words from file\n stopwords_file = os.path.join(BASE_DIR, 'data', 'stopwords.txt')\n with open(stopwords_file, 'r', encoding='utf-8') as f:\n stopwords = [line.rstrip(linesep) for line in f]\n \n #We don't want keywords to contain anything in this list\n forbidden = ['.',',',';',':','?','!','+',')','(','[',']','/','<','>','\"','©','1','2','3','4','5','6','7','8','9','0']\n\n # NLTK Chunking - detects noun phrases and phrases of form verb noun or adj noun\n patterns = \"\"\"NP: {<JJ>*<NN><NNS>}\n {<JJR><NNS>}\n {<JJ>*<NNS>}\n {<NN><NNS>} \n {<JJ><NNS>}\n {<JJ>*<NN>*}\n {<NN>*}\n {<NNS>*}\"\"\"\n chunker = RegexpParser(patterns)\n chunks = chunker.parse(tagged_words)\n\n #these are the phrases we want, as lists within a list\n validphrases = []\n for t in chunks.subtrees():\n if t.label() == 'NP':\n validphrases.append([x for x,y in t.leaves()])\n\n #turning lists within lists into actual noun phrases i.e [[radiation], [breast,cancer]] becomes [radiation, breast cancer]\n #sorry for my horrible code\n #trees suck\n lemmatizables = []\n for sublist in validphrases:\n lemmatizables.append(' '.join(sublist))\n\n lemmatizer = WordNetLemmatizer()\n lems = [lemmatizer.lemmatize(x) for x in lemmatizables]\n\n #removing stopwords after lemmatizinga, then removing anything containing punctuation or a number\n lems = filter(lambda lem: lem not in stopwords, lems)\n lems = filter(lambda lem: not any(char in lem for char in forbidden), lems)\n\n return tuple(lems)", "def lemmatize_verbs(words):\r\n lemmatizer = WordNetLemmatizer()\r\n lemmas = []\r\n for word in words:\r\n lemma = lemmatizer.lemmatize(word, pos='v')\r\n lemmas.append(lemma)\r\n return lemmas", "def lemmatize_text(text):\n text = nlp(text)\n text = ' '.join([word.lemma_ if word.lemma_ != '-PRON-' else word.text for word in text])\n return text", "def lemmatization(texts):\r\n texts_out = []\r\n for sent in texts:\r\n x=analyzer.analyze(sent)[0][0]\r\n if (x.pos==\"Unk\"):\r\n texts_out.append(analyzer.lemmatize(sent)[0][1][0])\r\n else:\r\n texts_out.append(x.lemma)\r\n return texts_out", "def lemmatizeVerbs(self, words):\n\t\tlemmas = [self.lemmatizer.lemmatize(word, pos='v') for word in words]\n\t\treturn lemmas", "def lemmatizeWords(self, words):\n\t\tlemmas = [self.lemmatizer.lemmatize(word) for word in words]\n\t\treturn lemmas", "def lemmatize_verbs(words):\n lemmatizer = WordNetLemmatizer()\n lemmas = []\n for word in words:\n lemma = lemmatizer.lemmatize(word, pos='v')\n lemmas.append(lemma)\n return lemmas", "def lemmatize_verbs(words):\n lemmatizer = WordNetLemmatizer()\n lemmas = []\n for word in words:\n lemma = lemmatizer.lemmatize(word, pos='v')\n lemmas.append(lemma)\n return lemmas", "def lemmatize_verbs(words):\n lemmatizer = WordNetLemmatizer()\n lemmas = []\n for word in words:\n lemma = lemmatizer.lemmatize(word, pos='v')\n lemmas.append(lemma)\n return lemmas", "def lemmatize_verbs(words):\n lemmatizer = WordNetLemmatizer()\n lemmas = []\n for word in words:\n lemma = lemmatizer.lemmatize(word, pos='v')\n lemmas.append(lemma)\n return lemmas", "def lemmatize_verbs(words):\n lemmatizer = WordNetLemmatizer()\n lemmas = []\n for word in words:\n lemma = lemmatizer.lemmatize(word, pos='v')\n lemmas.append(lemma)\n return lemmas", "def lemmatize_verbs(words):\n lemmatizer = WordNetLemmatizer()\n lemmas = []\n for word in words:\n lemma = lemmatizer.lemmatize(word, pos='v')\n lemmas.append(lemma)\n return lemmas", "def preprocessSentence(sentence):\n tokenizedSentence = tokenize.word_tokenize(sentence.lower())\n lemmatized = [lemma.lemmatize(token) for token in tokenizedSentence]\n\n noStopwords = [lemma for lemma in lemmatized\n if lemma not in englishStopwords\n and len(lemma) > 2\n and lemma.count(\"'\") != 1]\n noOddChars = [re.sub('[^\\w\\s]','',word) for word in noStopwords]\n return noOddChars", "def lem_and_stem(text, stopwords):\n\n\tlemmatizer = WordNetLemmatizer()\n\tstemmer = PorterStemmer()\n\tprocessed_text = []\n\tfor token, pos in text:\n\t\tpos = map_pos_tag(pos)\n\t\tif not (pos == wn.NOUN):\n\t\t\tcontinue\n\t\tif token not in stopwords and len(token) > 3:\n\t\t\tprocessed_token = stemmer.stem(lemmatizer.lemmatize(token, pos=pos))\n\t\t\tif processed_token not in stopwords:\n\t\t\t\tprocessed_text.append(processed_token)\n\treturn processed_text", "def process_words(texts, stop_words=stop_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\n texts = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\n texts = [bigram_mod[doc] for doc in texts]\n texts = [trigram_mod[bigram_mod[doc]] for doc in texts]\n texts_out = []\n nlp = spacy.load('en', disable=['parser', 'ner'])\n for sent in texts:\n doc = nlp(\" \".join(sent)) \n texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\n # remove stopwords once more after lemmatization\n texts_out = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts_out] \n return texts_out", "def lemmatize(query):\n wordlist = [wnl.lemmatize(word).lower() for word in query]\n return \" \".join(wordlist)", "def preprocess_sentence(sentence):\n def _wordnet_pos(tag):\n if tag.startswith('J'):\n return wordnet.ADJ\n elif tag.startswith('V'):\n return wordnet.VERB\n elif tag.startswith('R'):\n return wordnet.ADV\n else: \n # Default for WordNet is NOUN\n return wordnet.NOUN\n\n stop = stopwords.words(\"english\") + list(string.punctuation)\n\n sentence = sentence.lower()\n words = [word for word in word_tokenize(sentence) if word not in stop]\n tagged_words = pos_tag(words)\n return ' '.join([ \n lemmatizer.lemmatize(word, _wordnet_pos(tag)) for word,tag in tagged_words \n ])", "def lemmatize_fun(self):\n tokens = str(self.doc).split()\n cleaned_tokens = None\n if self.lemmatize_method == 'wordnet':\n cleaned_tokens = [self.lemmatizer.lemmatize(token) for token in tokens]\n else:\n cleaned_tokens = [self.lemmatizer.stem(token) for token in tokens]\n \n self.doc = ' '.join(cleaned_tokens)", "def lemmatize(query):\n wordlist = [wnl.lemmatize(word) for word in query.split()]\n return \" \".join(wordlist)", "def lemmatization(tokenized_word_list):\n porter=nltk.stem.PorterStemmer()\n filtered_tokens = [porter.stem(word) for word in tokenized_word_list]\n return filtered_tokens", "def process_words(texts, stop_words=stop_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\r\n texts = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\r\n texts = [bigram_mod[doc] for doc in texts]\r\n texts = [trigram_mod[bigram_mod[doc]] for doc in texts]\r\n texts_out = []\r\n nlp = spacy.load('en', disable=['parser', 'ner'])\r\n for sent in texts:\r\n doc = nlp(\" \".join(sent))\r\n texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\r\n # remove stopwords once more after lemmatization\r\n texts_out = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts_out]\r\n return texts_out", "def lemma_texts(raw_texts):\r\n tokens = pos_tag(word_tokenize(raw_texts))\r\n res = [lemmatize(pos_res[0], get_wordnet_pos(pos_res[1])) for pos_res in tokens]\r\n return \" \".join(res)", "def interestingLemmas(self):\n lemmas = set([])\n for token in self.importantTokenList():\n if token.isStopWord() == False:\n lemmas.add(token.lemma) \n return lemmas", "def lemmatize_text(self, text, print_tokens=False):\n # text = text.replace(\"/\", ' or ')\n # text = text.replace(\"\\\\\", ' or ')\n # # text = text.replace(\"'s\", '')\n # # text = text.replace(\"’s\", '')\n\n if print_tokens:\n print(pos_tag(word_tokenize(text)))\n\n # text = \"We’re looking for an exceptional Deep Learning (DL) Engineer\" # TODO: remove\n for word, tag in pos_tag(word_tokenize(text)):\n if tag.startswith('NN'): # NOUN\n # NN noun, singular ‘desk’, ’dog’\n # NNS noun plural ‘desks’, ‘dogs’\n # NNP proper noun, singular ‘Harrison’\n # NNPS proper noun, plural ‘Americans’\n yield self.wnl.lemmatize(word, pos='n') # wordnet.NOUN\n elif tag.startswith('VB'): # VERB\n # VB verb, base form take\n # VBD verb, past tense took\n # VBG verb, gerund/present participle taking\n # VBN verb, past participle taken\n # VBP verb, sing. present, non-3d take\n # VBZ verb, 3rd person sing. present takes\n yield self.wnl.lemmatize(word, pos='v') # wordnet.VERB\n elif tag.startswith('JJ'): # ADJ\n # JJ adjective ‘big’, ’good’\n # JJR adjective, comparative ‘bigger’, ‘better’\n # JJS adjective, superlative ‘biggest’\n yield self.wnl.lemmatize(word, pos='a') # wordnet.ADJ\n elif tag.startswith('RB'): # ADV\n # RB adverb very, silently,\n # RBR adverb, comparative better\n # RBS adverb, superlative best\n yield self.wnl.lemmatize(word, pos='r') # wordnet.ADV\n else:\n yield word", "def noun_lemma(word):\n if word.endswith(\"s\"):\n if word.endswith(\"ss\"):\n return word.lower()\n elif word.endswith(\"ies\"):\n return word[:-3].lower() + (\"y\")\n else:\n return word[:-1].lower()\n if word.endswith(\"men\"):\n return word[:-2].lower() + (\"an\")\n else:\n return word.lower()", "def lemmatized_words(tokens):\n lemmas = []\n for word in tokens:\n lemma = wn.morphy(word)\n if lemma:\n lemmas.append(lemma)\n else:\n lemmas.append(word)\n return lemmas", "def apply_lemmatize(tokens, wnl=WordNetLemmatizer()):\n return [wnl.lemmatize(token) for token in tokens]", "def aux_lemma(word):\n if re.match(r\"(does|did|doing)\", word):\n return (\"do\")\n elif re.match(r\"(had|has|'ve|having)\", word):\n return (\"have\")\n elif re.match(r\"(is|are|am|was|were|been|'s|being)\", word):\n return (\"be\")\n elif word == (\"'d\"):\n return (\"would\")\n else:\n return word.lower()", "def text_process(mess):\n nopunc= [char for char in mess if char not in string.punctuation]\n nopunc=''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in stopwords.words('english') and len(word)>2]", "def pron_lemma(word):\n if re.match(r\"(their|theirs|them|themselves)\", word):\n return (\"they\")\n elif re.match(r\"(his|him|himself)\", word):\n return (\"he\")\n elif re.match(r\"(her|hers|herself)\", word):\n return (\"she\")\n elif re.match(r\"(its|itself)\", word):\n return (\"it\")\n elif re.match(r\"(your|yours|yourself)\", word):\n return (\"you\")\n elif re.match(r\"(our|us|ours)\", word):\n return (\"we\")\n elif re.match(r\"(me|mine|my|myself)\", word):\n return (\"I\")\n elif word == (\"I\"):\n return word\n else:\n return word.lower()", "def test_issue4104(en_lookup_nlp):\n words = [\"dry\", \"spun\", \"spun-dry\"]\n doc = Doc(en_lookup_nlp.vocab, words=words)\n lemmatizer = en_lookup_nlp.get_pipe(\"lemmatizer\")\n doc = lemmatizer(doc)\n assert [token.lemma_ for token in doc] == [\"dry\", \"spin\", \"spin-dry\"]", "def process_query(raw_query: str) -> [str]:\n query_tokens = word_tokenize(raw_query)\n\n return [LEMMATIZER.lemmatize(token.lower()) for token in query_tokens\n if LEMMATIZER.lemmatize(token.lower()) not in STOP_WORDS]", "def wrk_words_wt_no(sent):\n\n words=word_tokenize(basic_cleaning2(sent).lower())\n lemmatizer = WordNetLemmatizer()\n try:\n arr=[]\n for i in range(len(words)):\n arr.append(morph.parse(words[i])[0].normal_form)\n arr2 = []\n for i in arr:\n arr2.append(lemmatizer.lemmatize(i, pos='v'))\n arr3 = []\n for i in arr2:\n arr3.append(lemmatizer.lemmatize(i, pos='n'))\n arr4 = []\n for i in arr3:\n arr4.append(lemmatizer.lemmatize(i, pos='a'))\n words1=[w for w in arr4 if w not in stop]\n# words1=[w for w in arr4 if w not in english_stops]\n words1=No_with_word(words1)\n return words1\n except TypeError:\n pass", "def lemmatize_words(text: str, lemmatizer=WordNetLemmatizer()) -> str:\n return ' '.join(lemmatizer.lemmatize(word) for word in text.split())", "def lemmatize_adjectives(self, words):\n\t\tlemmatizer = WordNetLemmatizer()\n\t\tlemmas = []\n\t\tfor word in words:\n\t\t\tlemma = lemmatizer.lemmatize(word, pos='a')\n\t\t\tlemmas.append(lemma)\n\t\treturn lemmas", "def get_nouns(self):\n\t\tblob = TextBlob(self.raw_string)\n\n\t\tfor word, tag in blob.tags:\n\t\t\tif tag in ['NNP', 'NN']:\n\t\t\t\tself.nouns.append(word.lemmatize())", "def lemmatize(text):\n\n lem = WordNetLemmatizer()\n return ' '.join(list(map(lambda x: lem.lemmatize(x, 'v'),\n text.split())))", "def tokenize(text):\n tokens = word_tokenize(text)\n words = [token for token in tokens if re.match(\"[a-zA-Z0-9]\", token)]\n no_stopwords = [word for word in words if word not in stopwords.words(\"english\")]\n lowercase_words = [word.lower() for word in no_stopwords]\n pos_tagged_words = pos_tag(lowercase_words)\n lemmatized_words = [WordNetLemmatizer().lemmatize(word, pos=convert_pos_tag(pos)) for word, pos in pos_tagged_words]\n return lemmatized_words", "def lemmatize_string(doc, stop_words=STOP_WORDS):\n\n if not stop_words:\n stop_words = []\n\n # remove unicode\n clean_doc = \"\".join([char for char in doc if char in printable])\n\n # Run the doc through spaCy\n doc = nlp(clean_doc)\n\n # Lemmatize and lower text\n tokens = [re.sub(\"\\W+\",\"\",token.lemma_.lower()) for token in doc ]\n tokens = [t for t in tokens if len(t) > 1]\n\n return ' '.join(w for w in tokens if w not in stop_words)", "def lemmatize(text):\n word_tokens = nltk.word_tokenize(text)\n lemmatized_word = [wordnet_lemmatizer.lemmatize(word) for word in word_tokens]\n return (\" \".join(lemmatized_word))", "def lookup_pronunciations_for_phrase(words: Sequence[Text]) -> Sequence[Phrase]:\n return EnglishUtils.all_possible_phrases_for(words)", "def tokenize(text):\n text = text.translate(str.maketrans('', '', string.punctuation))\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n s = stopwords.words('english')\n result = []\n for token in clean_tokens:\n if token not in s:\n result.append(token)\n\n return result", "def lemmatize_tagged_words(tagged_words):\n wordnet_lemmatizer = WordNetLemmatizer()\n lemmed_words = []\n for token in tagged_words:\n tokenStr = str(token[1])\n word_pos = get_wordnet_pos(tokenStr)\n lemmed_words.append(wordnet_lemmatizer.lemmatize(token[0], word_pos))\n\n return lemmed_words", "def stopwordsRem(tokens):\n no_sw = [t for t in tokens if not t in stopwords.words('english')]\n return no_sw", "def get_words_in(body):\n prepared_text = _prepare_text(body)\n tokens = nltk.word_tokenize(prepared_text)\n tags = nltk.pos_tag(tokens)\n lemmatized = [_lemmatize(tag) for tag in tags]\n no_stop = [word for word in lemmatized if word not in _stopwords]\n return no_stop", "def negatize_nouns(self, sent_target, max_synset_len=3):\n candidates = {}\n flatten = lambda l: [item for sublist in l for item in sublist]\n for w in sent_target[\"nouns\"]:\n candidates[w] = [(w, self.evaluator.value_evaluation(w))]\n synsets = Word(w).get_synsets(pos=NOUN)[:max_synset_len]\n upper_meanings = []\n for ss in synsets:\n hype = flatten([h.lemmas() for h in ss.hypernyms()])\n hypo = flatten([h.lemmas() for h in ss.hyponyms()])\n upper_meanings += hype\n upper_meanings += hypo\n upper_meanings += flatten([u.antonyms() for u in upper_meanings])\n upper_meanings = list(set(upper_meanings))\n for l in upper_meanings:\n val = self.evaluator.value_evaluation(l.name().lower())\n candidates[w].append((l.name().lower(), val))\n return candidates", "def tokenize(text):\n \n text.lower() # convert to lowercase\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text) #remove punctuation\n words = word_tokenize(text) # tokenize by individual word\n words = [w for w in words if w not in stopwords.words(\"english\")] #remove stop words\n lemmed = [WordNetLemmatizer().lemmatize(w) for w in words] #lemminization\n \n return words", "def clean_text(text):\n\n lemmizer = WordNetLemmatizer()\n stemmer = porter.PorterStemmer()\n\n stop = stopwords.words('english')\n stop += ['.', ',', ':', '...', '!\"', '?\"', \"'\", '\"', ' - ', ' — ', ',\"', '.\"', '!', ';', '♫♫', '♫', \\\n '.\\'\"', '[', ']', '—', \".\\'\", 'ok', 'okay', 'yeah', 'ya', 'stuff', ' 000 ', ' em ', \\\n ' oh ', 'thank', 'thanks', 'la', 'was', 'wa', '?', 'like', 'go', ' le ', ' ca ', ' I ', \" ? \", \"s\", \" t \",\n \"ve\", \"re\"]\n # stop = set(stop)\n\n cleaned_text = []\n\n for post in text:\n cleaned_words = []\n\n # remove parentheticals\n clean_parens = re.sub(r'\\([^)]*\\)', ' ', post)\n\n #clean_parens = [line.decode('utf-8').strip() for line in clean_parens]\n\n # tokenize into words\n for word in wordpunct_tokenize(clean_parens):\n\n\n # lowercase and throw out any words in stop words\n if word.lower() not in stop:\n\n # lemmatize to roots\n low_word = lemmizer.lemmatize(word)\n\n # stem and lowercase ( an alternative to lemmatize)\n # low_word = stemmer.stem(root.lower())\n\n # keep if not in stopwords (yes, again)\n if low_word.lower() not in stop:\n # put into a list of words for each document\n cleaned_words.append(low_word.lower())\n\n # keep corpus of cleaned words for each document\n cleaned_text.append(' '.join(cleaned_words))\n\n\n return cleaned_text", "def lemmatize(text: Iterable[Iterable[str]]) -> Generator[str, None, None]:\n tag_dict = {\n \"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"R\": wordnet.ADV,\n \"V\": wordnet.VERB,\n }\n lemmatizer = WordNetLemmatizer()\n # Tags the words with their type/pos (part of speech)\n # Then translates the tags using `tag_dict`\n # And finally lemmatizes the words if possible\n\n return (\n lemmatizer.lemmatize(word, tag_dict[tag[0]]) if tag[0] in tag_dict else word\n for sentence in pos_tag_sents(text) # Outer loop\n for word, tag in sentence # Inner loop\n )", "def process(self, sentence):\n\n # selects onlt alphanumeric words\n words = self.tokenizer.tokenize(sentence)\n\n # lemmatize the words\n words = [self.lemmatizer.lemmatize(word) for word in words]\n\n # lowercase all the words and remove single characters\n words = [word.lower() for word in words if len(word) > 1]\n\n # remove the stopwords using NLTK\n words = [word for word in words if word not in stopwords.words('english')]\n\n return words", "def process_words(texts, bigram_mod,trigram_mod,stop_words=stop_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\r\n texts = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\r\n texts = [bigram_mod[doc] for doc in texts]\r\n texts = [trigram_mod[bigram_mod[doc]] for doc in texts]\r\n texts_out = []\r\n nlp = spacy.load('en_core_web_sm')\r\n for sent in texts:\r\n doc = nlp(\" \".join(sent)) \r\n texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\r\n texts_out = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts_out] \r\n return texts_out", "def lemmatizeWord(self, lst):\n lemmatized_list = []\n for item in lst:\n lemmatized_list.append(self.lmtzr.lemmatize(item))\n return lemmatized_list", "def lemmatisation(self, \n text: str\n ) -> Union[str, List[str]]:\n lemmatiser = WordNetLemmatizer()\n\n def lemma_sans_kw(w: str\n ) -> str:\n return (\n lemmatiser.lemmatize(w) if w not in self.target_words else w\n )\n \n if not self.tokenise:\n return ' '.join(\n lemma_sans_kw(w) for w in word_tokenize(text)\n )\n return [lemma_sans_kw(w) for w in text]", "def lemmatize_words(records):\r\n print('Length of tagged_records: {:d}'.format(len(records)))\r\n print('Total number of words: {:d}'.format(sum([len(record) for record in records])))\r\n tagged_records = map(lambda record: pos_tag(record), records)\r\n tagged_records = filter_stopwords(tagged_records)\r\n lemmatizer = WordNetLemmatizer()\r\n lemmatized_records = list()\r\n for record in tagged_records:\r\n try:\r\n lemmatized_record = list(map(lambda word: lemmatizer.lemmatize(word[0], POS_TRANSLATOR[word[1]]), record))\r\n except Exception as err:\r\n print(record)\r\n raise err\r\n lemmatized_records.append(lemmatized_record)\r\n print('Total number of words after filtering: {:d}'.format(len(lemmatized_records)))\r\n return lemmatized_records", "def test_stopwords():\n assert TextNormalizer().transform([[\"a b\"]])[\"corpus\"][0] == [\"b\"]", "def tokenize(phrase_str):\n phrase = phrase_str.split(' ') # tokenize string by space character\n\n mapping = str.maketrans('', '', string.punctuation)\n\n # remove punctuation, remove non-alphabetic tokens, stem tokens\n phrase = [WordNetLemmatizer().lemmatize(token.translate(mapping)) for token in phrase \\\n if token.translate(mapping).isalpha()]\n\n return phrase", "def lemmatize(self, sentence):\n porter_stemmer = PorterStemmer()\n return ' '.join(porter_stemmer.stem(str(w)) for w in sentence.lower().split())", "def preprocess_query(query):\r\n # stop = set(stopwords.words('english'))\r\n tags = {'NN', 'NNS', 'NNP', 'NNP', 'NNPS', 'JJ', 'JJR', 'JJS'}\r\n wordnet_lemmatizer = WordNetLemmatizer()\r\n # for i in range(len(query)):\r\n query = [(word.lower(), convert(tag)) for (word, tag) in nltk.pos_tag(nltk.word_tokenize(query)) if tag in tags]\r\n query = [wordnet_lemmatizer.lemmatize(w, t) for (w, t) in query ]\r\n return query", "def part_lemma(word):\n if word == (\"n't\"):\n return (\"not\")\n else:\n return word.lower()", "def petit_nettoyage(ligne, lem_v=True, lem_n=True, len_elt=2, stopw=[]):\n lemmatizer = WordNetLemmatizer()\n for elt in ligne:\n if elt in (string.punctuation + string.digits):\n ligne = ligne.replace(elt, \" \")\n if lem_v and lem_n:\n liste = [\n lemmatizer.lemmatize(elt, pos=\"v\")\n for elt in ligne.split()\n if lemmatizer.lemmatize(elt, pos=\"v\") not in stopw\n ]\n liste = [\n lemmatizer.lemmatize(elt, pos=\"n\")\n for elt in liste\n if len(lemmatizer.lemmatize(elt, pos=\"n\")) > len_elt\n ]\n elif lem_v and lem_n:\n liste = [\n lemmatizer.lemmatize(elt, pos=\"v\")\n for elt in ligne.split()\n if (lemmatizer.lemmatize(elt, pos=\"v\") not in stopw)\n and (len(elt) > len_elt)\n ]\n elif lem_v and lem_n:\n liste = [\n lemmatizer.lemmatize(elt, pos=\"n\")\n for elt in ligne.split()\n if (lemmatizer.lemmatize(elt, pos=\"n\") not in stopw)\n and (len(elt) > len_elt)\n ]\n else:\n liste = [\n elt\n for elt in ligne.split()\n if (elt not in stopw) and (len(elt) > len_elt)\n ]\n ligne = \" \".join(liste)\n return ligne", "def lemitization(text_vector):\n\n text_vector = postag_doc(text_vector)\n global lemmatizer\n tokenised_document = [lemmatizer.lemmatize(word, pos=map_postags(\n postag)) for word, postag in text_vector]\n return tokenised_document", "def lemmatize(token, tag):\n tag = {\n 'N': wn.NOUN,\n 'V': wn.VERB,\n 'R': wn.ADV,\n 'J': wn.ADJ\n }.get(tag[0], wn.NOUN)\n\n return WordNetLemmatizer().lemmatize(token, tag)", "def filter_pos(self):\n all_tokens = []\n for zettel in self.lemma_tokens:\n tokens = []\n for word in zettel:\n if word[1] in ['NN', 'NNS', 'NNP', 'NNPS', 'NG']: # NG = n_gram\n tokens.append(word)\n all_tokens.append(tokens)\n self.lemma_tokens = all_tokens", "def token_by_lemma(text):\n lemmatizer = WordNetLemmatizer()\n word_list = word_tokenize(text)\n\n lemmatized_wrds = [lemmatizer.lemmatize(w) for w in word_list]\n return lemmatized_wrds", "def lemmatize_tokens(tokens):\n lemmatized = []\n wordnet_lemmatizer = WordNetLemmatizer()\n for token in tokens:\n lemmatized.append(wordnet_lemmatizer.lemmatize(token, pos='v'))\n return lemmatized", "def tokenize(text):\n tokens = nltk.word_tokenize(text)\n lemmatizer = nltk.WordNetLemmatizer()\n \n lemmatized_words = []\n for word in tokens:\n lemmatized_words.append(lemmatizer.lemmatize(word).lower().strip())\n \n return lemmatized_words", "def lemmatize(data: pd.Series) -> pd.Series:\n lemmatizer = WordNetLemmatizer()\n return data.apply(lambda row: re.sub(\n r'\\b\\w+\\b', lambda match: lemmatizer.lemmatize(\n match.group(), pos=to_pos([match.group()])), row))", "def normalise(word):\n word = word.lower()\n word = stemmer.stem_word(word)\n word = lemmatizer.lemmatize(word)\n return word", "def lemmatize_docs(docs):\r\n wordnet_lemmatizer = WordNetLemmatizer()\r\n for i in range(len(docs)):\r\n docs[i] = [wordnet_lemmatizer.lemmatize(w, t) for (w, t) in docs[i]]\r\n return docs", "def linguistic(self, tokens: list):\n tokens_modified = list()\n\n for pairs in tokens:\n token = pairs[0].translate(self.translator).lower() # remove punctuation symbols, lowercasing\n if token: # filter out those empty token\n tokens_modified.append((self.stemmer.stem(token), pairs[1])) # stemming\n tokens_modified = list(set(tokens_modified)) # delete those repeat pairs\n return tokens_modified", "def tokenize_lemmatize(column, min_word_len=2):\n nlp = spacy.load('en', disable=['tagger', 'parser', 'ner'])\n docs = column.tolist()\n\n def token_filter(token):\n return not (token.is_punct | token.is_space) and (len(token.text) >= min_word_len)\n\n filtered_tokens = []\n for doc in nlp.pipe(docs):\n tokens = [token.lemma_ for token in doc if token_filter(token)]\n filtered_tokens.append(tokens)\n\n return filtered_tokens", "def processwords(list_of_matches, lemmatag = False):\n list_of_matches = [w.lower() for w in list_of_matches]\n # remove nonwords, strip . to normalise \"dr.\"\n if translated_option != 'o' and translated_option != 'u':\n list_of_matches = [w.lstrip('.').rstrip('.') for w in list_of_matches if re.search(regex_nonword_filter, w)]\n \n list_of_matches.sort()\n \n # tokenise if multiword:\n if phrases and not n_gramming:\n from nltk import word_tokenize as word_tokenize\n list_of_matches = [word_tokenize(i) for i in list_of_matches]\n\n # this is just for plaintext ... should convert to unicode on file open\n if datatype == 'plaintext':\n try:\n list_of_matches = [unicode(w, errors = 'ignore') for w in list_of_matches]\n except TypeError:\n pass\n\n if not dependency and exclude and 'w' in exclude.keys():\n list_of_matches = [w for w in list_of_matches if not re.match(exclude['w'], w)]\n\n if lemmatise or 'l' in show:\n if not dependency:\n tag = gettag(query, lemmatag = lemmatag)\n lemmata = lemmatiser(list_of_matches, tag)\n tups = zip(list_of_matches, lemmata)\n res = []\n for w, l in tups:\n single_result = []\n if exclude and 'l' in exclude.keys():\n if re.match(exclude['l'], l):\n continue\n if 'w' in show:\n single_result.append(w)\n if 'l' in show:\n single_result.append(l)\n # bad fix:\n # this currently says, if pos in show, there must only be pos ...\n if 'p' in show:\n if lemmatise:\n single_result.append(l)\n else:\n single_result.append(w)\n\n single_result = '/'.join(single_result)\n res.append(single_result)\n list_of_matches = res\n\n if titlefilter and not dependency:\n list_of_matches = titlefilterer(list_of_matches)\n if spelling:\n list_of_matches = convert_spelling(list_of_matches, spelling = spelling)\n\n # use blacklist option in gui\n if 'blacklist' in kwargs.keys():\n stopwords = False\n if kwargs['blacklist'] is not False:\n if kwargs['blacklist'] is True:\n from dictionaries.stopwords import stopwords as my_stopwords\n stopwords = [i.lower() for i in my_stopwords]\n list_of_matches = [w for w in list_of_matches if w not in stopwords]\n else:\n if type(kwargs['blacklist']) == list:\n stopwords = [i.lower() for i in kwargs['blacklist']]\n list_of_matches = [w for w in list_of_matches if w not in stopwords]\n else:\n regexblacklist = re.compile(kwargs['blacklist'])\n list_of_matches = [w for w in list_of_matches if not re.search(regexblacklist, w)]\n\n #if not split_con:\n # list_of_matches = unsplitter(list_of_matches)\n \n # turn every result into a single string again if need be:\n if phrases:\n output = []\n for res in list_of_matches:\n joined = ' '.join(res)\n output.append(joined)\n return output\n else:\n return list_of_matches", "def normalise(word):\n\tword = word.lower()\n\t# word = stemmer.stem_word(word) #if we consider stemmer then results comes with stemmed word, but in this case word will not match with comment\n\tword = lemmatizer.lemmatize(word)\n\treturn word", "def tokenize(text):\n \n tokens = word_tokenize(text)\n \n STOPWORDS = list(set(stopwords.words('english')))\n # remove short words\n tokens = [token for token in tokens if len(token) > 2]\n # remove stopwords\n tokens = [token for token in tokens if token not in STOPWORDS]\n \n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def det_lemma(word):\n if word == (\"an\"):\n return (\"a\")\n else:\n return word.lower()", "def _proc(dat):\n def lemma(text):\n lemmatizer = WordNetLemmatizer()\n w_tokenizer = WhitespaceTokenizer()\n return [lemmatizer.lemmatize(w) for w in w_tokenizer.tokenize(text)]\n\n dat['text_lemmatized'] = dat['clean_comments'].apply(lemma)\n dat['text_lemmatized'] = dat['text_lemmatized'].apply(' '.join)", "def lemmatize_corpus(text_corpus, v):\n\tif (v):\n\t\tprint(\"lemmatizing corpus...\")\n\n\tlemma_corpus = []\n\tfor i in range(0, len(text_corpus)):\n\n\t\t#tokenize and tag\n\t\tpara = text_corpus[i]\n\t\tpara_token = word_tokenize(para)\n\t\tpara_tagged = pos_tag(para_token)\n\t\tpara_lemma = []\n\n\t\t#lemmatize\n\t\twnl = WordNetLemmatizer()\n\t\tfor j in range(0, len(para_tagged)):\n\t\t\tpara_tagged[j] = (para_tagged[j][0],get_wordnet_pos(para_tagged[j][1]))\n\t\t\tword_lemma = wnl.lemmatize(para_tagged[j][0], para_tagged[j][1])\n\t\t\tpara_lemma.append(word_lemma)\n\n\t\t#return to str format\n\t\tpara_lemma_str = \" \".join(para_lemma)\n\t\tlemma_corpus.append(para_lemma_str)\n\n\treturn lemma_corpus", "def preprocess(docs):\r\n # stop = set(stopwords.words('english'))\r\n tags = {'NN', 'NNS', 'NNP', 'NNP', 'NNPS', 'JJ', 'JJR', 'JJS'}\r\n for i in range(len(docs)):\r\n docs[i] = [(word.lower(), convert(tag)) for (word, tag) in nltk.pos_tag(nltk.word_tokenize(docs[i])) if tag in tags]\r\n return lemmatize_docs(docs)", "def sents_2(list_of_tweets):\n stopwords = nltk.corpus.stopwords.words('english')\n contextTerms = []\n for sent in list_of_tweets:\n for word in sent.split():\n word_lemmatizer = WordNetLemmatizer()\n word = word_lemmatizer.lemmatize(word.lower())\n if wordnet.synsets(word) and word not in stopwords and len(word)>2:\n contextTerms.append(word)\n\n #print( contextTerms)\n return contextTerms", "def clean_the_text(text):\n \n #Replace non-word characters with empty space\n text = re.sub('[^A-Za-z0-9\\s]', ' ', text)\n \n #Remove punctuation\n text = ''.join([word for word in text if word not in string.punctuation])\n \n #Bring text to lower case\n text = text.lower()\n \n #Tokenize the text\n tokens = re.split('\\W+', text)\n \n #Remove stopwords\n text = [word for word in tokens if word not in stopword]\n \n #Lemmatize the words\n text = [wn.lemmatize(word) for word in text]\n \n #Return text\n return text", "def verb_lemma(word):\n if word.endswith(\"ed\"):\n if word[:-2].endswith(\"v\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"at\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"it\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"et\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ut\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ac\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"i\"):\n return word[:-3].lower() + \"y\"\n elif word[:-2].endswith(\"ir\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ag\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nc\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nu\"):\n return word[:-2].lower() + \"e\"\n else:\n return word[:-2].lower() \n elif word.endswith(\"ing\"):\n if word[:-3].endswith(\"v\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"at\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"it\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"et\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ut\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ac\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"i\"):\n return word[:-4].lower() + \"y\"\n elif word[:-3].endswith(\"ir\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ag\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nc\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nu\"):\n return word[:-3].lower() + \"e\"\n else:\n return word[:-3].lower()\n elif re.match(r\"(does|did|done)\", word):\n return (\"do\")\n elif re.match(r\"(is|are|am|was|will|were|been)\", word):\n return (\"be\")\n elif word == (\"'s\"):\n return (\"be\")\n elif re.match(r\"(had|has|'ve)\", word):\n return (\"have\")\n else:\n return word.lower()", "def removestopwords(query):\n wordlist = [word for word in query.split() if word not in stopwords.words('english')]\n return \" \".join(wordlist)", "def process_text(text):\n doc = spacy_model(text.lower())\n result = []\n for token in doc:\n if token.text in spacy_model.Defaults.stop_words:\n continue\n if token.is_punct:\n continue\n if token.lemma_ == '-PRON-':\n continue\n result.append(token.lemma_)\n return \" \".join(result)", "def tokenize(text):\n tokens=word_tokenize(text)\n lemmatizer=WordNetLemmatizer()\n \n clean_tokens=[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens\n pass", "def remove_stopwords(ingredient, stopwords):\n ingredient = ingredient.lower() # normalizes to lower case\n no_stops = [gram for gram in ingredient.split(\" \") if gram not in stopwords]\n new_ingredient = \" \".join(no_stops)\n return new_ingredient", "def inverse_transform_lemmas(self, predictions):\n pred_lemmas = []\n if self.include_lemma == 'generate':\n for pred in predictions:\n pred_lem = ''\n for positions in pred:\n top_idx = np.argmax(positions) # winning position\n c = self.lemma_char_idx[top_idx] # look up corresponding char\n if c in ('$', '%'):\n continue\n if c == '|':\n break\n else:\n pred_lem += c # add character\n pred_lemmas.append(pred_lem)\n\n elif self.include_lemma == 'label':\n predictions = np.argmax(predictions, axis=1)\n pred_lemmas = self.lemma_encoder.inverse_transform(predictions) \n \n return pred_lemmas", "def searchphrases(query):\n query_nostopwords = removestopwords(query)\n query_lemmatized = lemmatize(query_nostopwords) #look like\n phraseids = []\n ngramids=[]\n words=query_lemmatized.split()\n query_ngram = \"select id from ngrams where lower(lemmangrams) like lower('%{}%')\".format(query_lemmatized)+\" or lower(lemmangrams) like lower('%{}%')\".format(words[0])\n for word in words[1:]:\n query_ngram=query_ngram+\" or lower(lemmangrams) like lower('%{}%')\".format(word)\n con = it.engine.execute(query_ngram)\n rows_phrase = con.fetchall()\n if rows_phrase:\n ngramids = list(set([str(i[0]) for i in rows_phrase]))\n phraseids.extend(ngramids)\n phraseids = list(set(phraseids))\n results=categorize(phraseids)\n return results", "def lemmatize_tagged_tokens(tagged_tokens, wn):\n return [\n lemmatize(wn.value, token, pos=pos).lower()\n for token, pos in tagged_tokens\n ]" ]
[ "0.73897773", "0.73393244", "0.72585356", "0.7244815", "0.71327686", "0.69146174", "0.68672264", "0.68209785", "0.6816726", "0.6812039", "0.6789802", "0.6779173", "0.67476636", "0.6746864", "0.6731405", "0.6731262", "0.67183787", "0.6701651", "0.66856176", "0.66856176", "0.66856176", "0.66856176", "0.66856176", "0.66856176", "0.6663114", "0.66400886", "0.66274816", "0.66251475", "0.66219306", "0.6618308", "0.6593374", "0.65826494", "0.65747845", "0.6568638", "0.6551097", "0.6515441", "0.64806426", "0.6476722", "0.6475141", "0.6451669", "0.6410553", "0.6402371", "0.63973045", "0.6394101", "0.6387726", "0.63610667", "0.6339907", "0.6318728", "0.63122797", "0.62913316", "0.6266331", "0.62603414", "0.62435156", "0.6232152", "0.6225483", "0.62235564", "0.6202677", "0.6200604", "0.6168451", "0.6161519", "0.6160777", "0.61538434", "0.6147658", "0.61413294", "0.6137634", "0.6129949", "0.61283517", "0.6126499", "0.6124726", "0.6116443", "0.6100633", "0.60881114", "0.60877603", "0.60768694", "0.60682863", "0.6041939", "0.60368615", "0.60274124", "0.6025369", "0.6024836", "0.6011482", "0.60085607", "0.60061926", "0.59878445", "0.598583", "0.5978802", "0.5944892", "0.5936205", "0.5935745", "0.5929702", "0.59290296", "0.5925957", "0.59144294", "0.5912611", "0.59100163", "0.59086746", "0.59080905", "0.5905971", "0.59047323", "0.59039164" ]
0.8088282
0
downloading and renaming columns replace space with '_'
def download_datasets_csv(url): # dataset = pd.read_csv(url, sep='\t') dataset = pd.read_csv(url, sep=",") dataset.columns = dataset.columns.str.replace(" ", "_") return dataset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tidy_cols(my_csv):\n return [re.sub(\" \", \"_\", col.lower()) for col in my_csv.columns]", "def _clean_up_table_column_names(loop_dict):\n \n # Make the column names all lowercase\n # and remove any underscores from the beginning\n for key in loop_dict.keys():\n rename_dict = { x:re.sub(r\"\"\"^_\"\"\", '', x.lower()) for x in loop_dict[key].columns }\n loop_dict[key].rename(columns=rename_dict, inplace=True)\n \n return loop_dict", "def change_column_names(filename, old_colnames, new_colnames):\n\tos.system('mv %s %s.copy' % (filename, filename))\n\tc = pyfits.open(filename+\".copy\")\n\ttbhdu = c[1]\n\tncol = len(tbhdu.data.columns)\n\tnewcols = []\n\tfor i in range(ncol):\n\t\tcolname = tbhdu.data.columns[i].name\n\t\tcolfmt = tbhdu.data.formats[i]\n\t\tcolarr = tbhdu.data.field(colname)\n\t\tfor j in range(len(old_colnames)):\n\t\t\tif tbhdu.data.columns[i].name == old_colnames[j]:\n\t\t\t\tcolname = new_colnames[j]\n\t\t\t\tbreak\n\t\t\t\t#print colname\n\t\tnewcols += [pyfits.Column(name=colname, format=colfmt, array=colarr)]\n\tnewcols = pyfits.ColDefs(newcols)\n\t#print newcols\n\tnewhdu = pyfits.new_table(newcols)\n\tnewhdu.writeto(filename)\n\tc.close()\n\tos.system('rm %s.copy' % filename)", "def rename_id_col(df: pd.DataFrame):\r\n for col in df.columns:\r\n if \"id\" in col:\r\n df.rename(columns={col: col.replace(\"-\", \"_\")}, inplace=True)\r\n return df", "def remove_spaces_from_columns_names(file_path):\n cprint(f\"### Function Name:-> {inspect.stack()[0][3]} ###\", 'yellow', 'on_grey', attrs=['bold'])\n try:\n path_obj = Path(file_path)\n df = get_df_from_data_file(file_path)\n df.columns = df.columns.str.strip()\n delete_data_file(file_path)\n if path_obj.suffix == \".xlsx\":\n df.to_excel(path_obj.as_posix(), index=False)\n elif path_obj.suffix == \".csv\":\n df.to_csv(path_obj.as_posix(), index=False, sep=',')\n except Exception as ex:\n cprint(traceback.format_exc(), 'red')\n log_exception(traceback.format_exc())", "def _slugify_columns(column_names):\n\n encoded_names = []\n\n for column_name in column_names:\n slug = RE_ENCODED_COLUMN.sub('_', column_name).lower()\n slug = make_unique(slug, encoded_names + Parser.reserved_words)\n encoded_names.append(slug)\n\n return encoded_names", "def format_cols(data):\n col_list = []\n for elem in data:\n new_str = \"\"\n for string in elem.split('_'):\n new_str += string.capitalize() + \" \"\n col_list.append(new_str[:-1])\n return col_list", "def replace_underscore(filename):\n name = get_name_from_filename(filename)\n ext = get_extension_from_filename(filename)\n old, new = '_', ' '\n\n new_name = name.replace(old, new)\n\n return new_name + ext", "def _column_original_name(name):\n if ':' in name:\n return name.split(':')[-1]\n else:\n return name", "def trim_long_colnames(cat):\n import re\n long_short_pairs = [\n ('GeneralShapeletPsf', 'GSPsf'),\n ('DoubleShapelet', 'DS'),\n ('noSecondDerivative', 'NoSecDer')]\n for long, short in long_short_pairs:\n long_re = re.compile(long)\n for col_name in cat.colnames:\n if long_re.search(col_name):\n new_col_name = long_re.sub(short, col_name)\n cat.rename_column(col_name, new_col_name)", "def normalize_col_name(col_name, used_column_names, is_relation):\n field_params = {}\n field_notes = []\n\n new_name = col_name.lower()\n if new_name != col_name:\n field_notes.append('Field name made lowercase.')\n\n if is_relation:\n if new_name.endswith('_id'):\n new_name = new_name[:-3]\n else:\n field_params['db_column'] = col_name\n\n new_name, num_repl = re.subn(r'\\W', '_', new_name)\n if num_repl > 0:\n field_notes.append('Field renamed to remove unsuitable characters.')\n\n if new_name.find(LOOKUP_SEP) >= 0:\n while new_name.find(LOOKUP_SEP) >= 0:\n new_name = new_name.replace(LOOKUP_SEP, '_')\n if col_name.lower().find(LOOKUP_SEP) >= 0:\n # Only add the comment if the double underscore was in the original\n # name\n field_notes.append(\n \"Field renamed because it contained more than one '_' in a row.\"\n )\n\n if new_name.startswith('_'):\n new_name = 'field%s' % new_name\n field_notes.append(\"Field renamed because it started with '_'.\")\n\n if new_name.endswith('_'):\n new_name = '%sfield' % new_name\n field_notes.append(\"Field renamed because it ended with '_'.\")\n\n if keyword.iskeyword(new_name):\n new_name += '_field'\n field_notes.append(\n 'Field renamed because it was a Python reserved word.')\n\n if new_name[0].isdigit():\n new_name = 'number_%s' % new_name\n field_notes.append(\n \"Field renamed because it wasn't a valid Python identifier.\")\n\n if new_name in used_column_names:\n num = 0\n while '%s_%d' % (new_name, num) in used_column_names:\n num += 1\n new_name = '%s_%d' % (new_name, num)\n field_notes.append('Field renamed because of name conflict.')\n\n if col_name != new_name and field_notes:\n field_params['db_column'] = col_name\n\n return new_name, field_params, field_notes", "def _remap_column_names(self, frame):\n\n frame[TransactionColumns.BANK.name] = self.INSTITUTION\n frame[TransactionColumns.ACCOUNT.name] = self.account\n frame.rename(columns=self._FIELD_2_TRANSACTION, inplace=True)\n frame[TransactionColumns.CHECK_NO.name] = None\n return frame", "def convert_header_names(word: str, delim='-') -> str:\n return delim.join(x.capitalize() or '_' for x in word.split('_'))", "def rename_columns() -> list:\n columns_name = ['ICAO_empresa_aerea', 'numero_voo', 'codigo_DI', 'codigo_tipo_linha',\n 'ICAO_aerodromo_partida', 'ICAO_aerodromo_destino', 'partida_prevista',\n 'partida_real', 'chegada_prevista', 'chegada_real', 'situacao_voo',\n 'codigo_justificativa', 'month', 'year']\n\n return columns_name", "def short_column(name : str) -> str:\n return name.split(\"-\")[1]", "def _get_col_rename(df, dftype):\n \n # Build a dictionary of column renamings for use in pandas rename function\n renamed_columns = {}\n column_names = list(df.columns)\n lower_columns = [name.lower().replace(' ','').replace('_','') for name in column_names]\n for i in range(len(column_names)):\n renamed_columns[column_names[i]] = lower_columns[i]\n\n if dftype == 'csv':\n # build csv rename dictionary\n renamed_columns['museumcatno'] = 'museumcatnumber'\n renamed_columns['huc8number'] = 'huc8'\n elif dftype == 'api':\n # build api rename dictionary\n renamed_columns['key'] = 'specimennumber'\n renamed_columns['decimallatitude'] = 'latitude'\n renamed_columns['decimallongitude'] = 'longitude'\n renamed_columns['latlongsource'] = 'source'\n renamed_columns['latlongaccuracy'] = 'accuracy'\n else:\n raise ValueError(f\"Dataframe type '{dftype}' invalid - Accepted inputs are 'csv' or 'api'\")\n\n return renamed_columns", "def get_name(tablename):\n\n return tablename[tablename.find(\"_\") + 1:].replace(\"_\", \" \").capitalize()", "def _rename_columns_step(self, op: data_algebra.data_ops_types.OperatorPlatform, *, data_map: Dict[str, Any]):\n if op.node_name != \"RenameColumnsNode\":\n raise TypeError(\n \"op was supposed to be a data_algebra.data_ops.RenameColumnsNode\"\n )\n res = self._compose_polars_ops(op.sources[0], data_map=data_map)\n if isinstance(res, pl.LazyFrame):\n # work around https://github.com/pola-rs/polars/issues/5882#issue-1507040380\n res = res.collect()\n res = res.rename(op.reverse_mapping)\n res = res.select(op.columns_produced())\n if self.use_lazy_eval and isinstance(res, pl.DataFrame):\n res = res.lazy()\n return res", "def substitute_names(df):\n\n masking_tag = '_sql'\n duplicated_names = ['SwitchName', 'Fabric_Name', 'SwitchMode', 'Memory_Usage', 'Flash_Usage', 'Speed']\n replace_dct = {orig_name + masking_tag: orig_name for orig_name in duplicated_names}\n df.rename(columns=replace_dct, inplace=True)", "def smooth_columns(input_frame):\n column_labels = list(input_frame.columns)\n input_frame.columns = [c.lower().replace('_','') for c in column_labels]\n return input_frame", "def fix_filename(s):\n t = s.translate(BAD_FILETABLE)\n if t.count('.') > 1:\n for i in range(t.count('.') - 1):\n idot = t.find('.')\n t = \"%s_%s\" % (t[:idot], t[idot+1:])\n return t", "def _update_column_name(self, column, idx, old_name, name):\n dtype = self.dtype\n # Updating the names on the dtype should suffice\n dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1 :]", "def city_rename(df, target=None):\n if not target:\n target = ['city']\n for col in target:\n df[col] = df[col].apply(\n lambda text: col + '-' + str(text).replace(' ', '_'))\n return None", "def rename_cyano_columns(df): \n cols = list(df.columns)\n for i, col in enumerate(df.columns):\n if col.lower().find(\"pro\") != -1 and col.lower().find(\"abun\") != -1: # prochlorococcus abundance\n cols[i] = PROC\n elif col.lower().find(\"syn\") != -1 and col.lower().find(\"abun\") != -1: # synechococcus abundance\n cols[i] = SYNC\n elif col.lower().find(\"pico\") != -1 and col.lower().find(\"abun\") != -1: # picoeukaryote abundance\n cols[i] = PICO\n df.columns = cols \n return df.columns", "def remove_spaces(filenames):\n for filename in filenames:\n if \" \" in filename:\n new_name = filename.replace(\" \", \"_\")\n print \"replacing\", filename, \"with\", new_name\n os.rename(filename, new_name)", "def change_to_video_name(csv_name, suffix):\n return csv_name[:-10]+\".\"+suffix", "def cleanup_column_names(df, rename_dict={}, do_inplace=True):\n if not rename_dict:\n return df.rename(columns={col: col.lower().replace(' ', '_')\n for col in df.columns.values.tolist()},\n inplace=do_inplace)\n else:\n return df.rename(columns=rename_dict, inplace=do_inplace)", "def rename_columns(dataframe,new_prefix='pca_',old_colomn_starting_index=2,new_column_starting_index=1):\n old_column_index = old_colomn_starting_index\n new_column_index = new_column_starting_index\n for i in range(0,n_comp):\n if column_name:\n dataframe = dataframe.withColumnRenamed('_'+str(old_colomn_starting_index),column_name+'_'+new_prefix+str(new_column_starting_index))\n else:\n dataframe = dataframe.withColumnRenamed('_'+str(old_colomn_starting_index),new_prefix+str(new_column_starting_index))\n old_colomn_starting_index+=1\n new_column_starting_index+=1\n return dataframe", "def underscore(text):\n # return '__'+text.replace(' ','__') + '__'\n return text.replace(' ', '__')", "def test_multicolumn_factorize_columns_suffix_change():\n df = pd.DataFrame(\n {\n \"a\": [\"hello\", \"hello\", \"sup\"],\n \"b\": [1, 2, 3],\n \"c\": [\"aloha\", \"nihao\", \"nihao\"],\n }\n ).factorize_columns(column_names=[\"a\", \"c\"], suffix=\"_col\")\n assert \"a_col\" in df.columns\n assert \"c_col\" in df.columns\n assert \"a_enc\" not in df.columns\n assert \"c_enc\" not in df.columns", "def safe_column_name(string):\n string = unidecode(string.replace(' ', '_').lower())\n return re.sub(r'[^0-9a-z_]','', string)", "def column_name(name):\n # Only needs exceptions to standard token cleanup\n column_map = {\n \"line#\" : \"ignore\",\n \"date\" : \"timestamp\",\n \"rh\" : \"humidity\",\n \"par\" : \"par_ue\"\n }\n\n if name in column_map:\n return column_map[name]\n \n return name", "def download_files_from_minio(bucket_name, file_prefix, ls_files_to_download):\n minio_client = get_minio_client()\n ls_minio_files = list(minio_client.list_objects(bucket_name, file_prefix))\n ls_downloaded_files = []\n local_object_dir = tempfile.mkdtemp()\n for minio_object_path in ls_files_to_download:\n local_object_name = minio_object_path.replace(file_prefix, \"\")\n local_object_path = os.path.join(local_object_dir,local_object_name) \n get_file_fromMinio(bucket_name, minio_object_path, local_object_path)\n ls_downloaded_files.append(local_object_path)\n return ls_downloaded_files\n\n\n def process_columns(string_to_process, replace_string='_'):\n \"\"\"\n Process a column name by :\n - processing special aplphabetical characters\n - replacing special characters by value set in 'replace_string' \n #Parameters\n :param string_to_process (str): String to process (column name)\n :param replace_string (str, default : '_'): Value to set for replaced characters\n (ex : \"/\" -> \"\", where \"\" is the replace_string)\n #Return\n return out (str): Processed string\n \"\"\"\n BIGQUERY_COLUMN_NAME_RE = \"[^0-9a-zA-Z_]+\"\n # Get encoding of column\n str_bytes = str.encode(string_to_process)\n encoding = 'utf-8'\n # Process any special alphabetical character\n temp_out = unicodedata\\\n .normalize('NFKD', string_to_process)\\\n .encode('ASCII', 'ignore')\\\n .decode(encoding)\n \n out = re.sub(BIGQUERY_COLUMN_NAME_RE, replace_string, temp_out)\n out = out.lower()\n return out", "def cleaning_data():\n\n data.drop([\"Unnamed: 0\"], axis = 1, inplace = True)\n data.columns = map(str.upper, data.columns)\n return data", "def get_data_name(name):\n if name.find('Data') == 0:\n name = name[4:]\n name_ = ''\n for i, char in enumerate(name):\n if char.isupper() and i > 0:\n name_ += '_'\n name_ += char.lower()\n return name_", "def _rename_cols(df, prefix):\n df.columns = [\n ColNameFormatter.fmt(col_name)\n if col_name in NON_DUPLICATE_COLS\n else '{}{}'.format(prefix, col_name)\n for col_name in df.columns.values\n ]", "def wrap_columns_name(self, format_string):\n self._data_frame = self._data_frame.rename(\n columns=lambda column: format_string.format(column)\n )", "def update_column_title():\n\n column_id = request.get_json()['column_id']\n column_title = request.get_json()['column_title']\n\n return sql_manager.update_column_title(column_id, column_title)", "def add_underscore(houses:pd.DataFrame) -> pd.DataFrame:\n houses['PROPERTY_TYPE']= houses[PROPERTY_TYPE]\n houses['ZIP_CODE']= houses[ZIP_CODE]\n houses['SQFT']= houses[SQUARE_FEET]\n houses['YEAR_BUILT']= houses[YEAR_BUILT]\n houses['DAYS_ON_MARKET']= houses[DAYS_ON_MARKET]\n houses['SQFT_PER']= houses[SQFT_PER]\n houses['HOA']= houses[HOA]\n drop_columns= [PROPERTY_TYPE, ZIP_CODE, SQUARE_FEET, YEAR_BUILT, \n DAYS_ON_MARKET, SQFT_PER, HOA]\n return houses.drop(drop_columns, axis= 1)", "def simpleColumnNames():\n global masterdf\n\n df = masterdf.copy()\n #df = df[:int(len(df)*percentdata*0.01)]\n # new collumn names otherwise create_indicators break\n # [OPEN-HIGH-LOW-CLOSE-TICKVOL-VOL]\n # O-H-L-C-T-V-S colum suffixes\n newnames = [ symbols[i]+'_'+masterdf.columns[j][0]\n for i in range(len(symbols)) for j in range(7) ]\n df.columns = newnames\n\n return df", "def proper_names(city, month, day):\n city = str.title(os.path.splitext(city)[0])\n city = str.replace(city, '_', ' ')\n month = str.title(month)\n day = str.title(day)\n return city, month, day", "def _transformed_name(key: Text) -> Text:\n return key + \"_xf\"", "def build_messy_lookup(source,dest,ref_col):\n la = QuickGrid().open(source)\n od = QuickGrid().open(join(\"source_files\",\"local_authority_data_names.csv\"))\n\n lookup = QuickGrid()\n lookup.header = [\"la name\",ref_col]\n\n possible = [\"official-name\",\"alt-name-1\",\"alt-name-2\",\"alt-name-3\"]\n possible = [p for p in possible if p in la.header]\n for r in la:\n for p in possible:\n if r[p]:\n lookup.add([r[p],r[ref_col]])\n \n current_names = [x[0] for x in lookup]\n\n for r in od:\n if r[\"name\"] not in current_names:\n code = r[\"local-authority\"].split(\":\")[1]\n lookup.add([r[\"name\"],code])\n \n lookup.save(dest,force_unicode=True)", "def prefix_columns(cat, filt, fields_to_skip=()):\n old_colnames = list(cat.columns)\n for field in fields_to_skip:\n field_idx = old_colnames.index(field)\n old_colnames.pop(field_idx)\n\n transformation = {col: '%s_%s' % (filt, col) for col in old_colnames}\n cat.rename(index=str, columns=transformation, inplace=True)", "def rename_colnames(colnames):\n\n # Keys need to be lower case!\n lookup = {\"date\" : \"datumsec\",\n \"ddd\" : \"dd\",\n \"ffkmh\" : \"ff\",\n \"gustkmh\" : \"ffx\",\n \"p0hpa\" : \"psta\",\n \"pseahpa\" : \"pmsl\",\n \"ptnd\" : \"ptend\",\n \"nh\" : \"nh\",\n \"nt\" : \"nt\",\n \"n\" : \"n\",\n \"inso\" : \"sunday\",\n \"vis\" : \"vv\"}\n\n # Rename\n colnames = [x.lower() for x in colnames]\n import re\n for i in range(0, len(colnames)):\n for k in lookup.keys():\n if re.match(\"^{:s}$\".format(colnames[i].lower()), k):\n colnames[i] = lookup[k]\n\n return colnames", "def renamefile(filename):\n new_data_list = []\n with open(filename, 'r') as f:\n data_list = f.read().split('\\n')\n\n print('Generating new data list..')\n for data in tqdm(data_list):\n if len(data) == 0:\n continue\n data_info = data.split(' ')\n\n #data_info[0] = data_info[0].replace('jpg', 'png')\n #data_info[1] = data_info[1].replace('jpg', 'png')\n for it, name in enumerate(data_info):\n data_info[it] = '/'.join(name.split('/')[1:])\n if data_info[2].find('extras') == -1:\n new_data_list.append(' '.join(data_info))\n\n with open(filename, 'w') as f:\n print('writing new data names..')\n\n for it, data in tqdm(enumerate(new_data_list)):\n if len(data) == 0:\n continue\n\n if it == len(new_data_list)-1:\n f.write(data)\n else:\n f.write(data+'\\n')\n\n print('Done.')", "def clean_table_name(table_name):\n path_underscore = table_name.translate(table_name.maketrans(\"-. \", \"___\"))\n return \"_\".join(filter(None, path_underscore.split(\"_\")))", "def rename_fields(all_data):\n\tfield_map = load_json('field_mapping.json', fdir=os.path.join('data', 'archived_data'))\n\tfor old_field in all_data.keys():\n\t\ttmp_vals = pd.Series(all_data[old_field].values, index=all_data.index)\n\t\tall_data = all_data.drop(old_field, 1)\n\t\tif old_field in field_map:\n\t\t\tnew_field = field_map[old_field]\n\t\t\tall_data[new_field] = tmp_vals\n\treturn all_data", "def parse_column_names(df):\n cols = set(df.columns.tolist())\n if \"StreamID\" in cols:\n df.rename(columns={\"StreamID\": \"stream_id\"}, inplace=True)\n if \"TimesViewed\" in cols:\n df.rename(columns={\"TimesViewed\": \"times_viewed\"}, inplace=True)\n if \"total_price\" in cols:\n df.rename(columns={\"total_price\": \"price\"}, inplace=True)\n\n return df", "def change_image_name(df, column):\n return [i + '.jpeg' for i in df[column]]", "def _refactor_time_columns(write_cursor: 'DBCursor') -> None:\n log.debug('Enter _refactor_time_columns')\n write_cursor.execute('ALTER TABLE timed_balances RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE timed_location_data RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE trades RENAME COLUMN time TO timestamp')\n write_cursor.execute('ALTER TABLE asset_movements RENAME COLUMN time TO timestamp')\n log.debug('Exit _refactor_time_columns')", "def namingConvention(columnName):\n words = columnName.lower().split(\"_\")\n\n def cap(word):\n if word.lower() == \"id\":\n return word.upper()\n else:\n return word.capitalize()\n\n return words[0] + \"\".join(map(cap, words[1:]))", "def __clean_column_names(self, columns):\r\n cols = []\r\n for column in columns:\r\n cols.append(column.replace('\"', ''))\r\n return cols", "def filename_format(keyword):\r\n tmp = re.sub('[^0-9a-zA-Z]', '_', keyword).strip('_')\r\n return re.sub('_+', '_', tmp).lower()", "def exported_name(fullname: str) -> str:\n # TODO: Support unicode\n return fullname.replace('___', '___3_').replace('.', '___')", "def name_cleaning(df):\n # Custom cleaning\n df.columns = [re.sub(\"[\\\\. \\\\(\\\\)\\\\/]+\", \"_\", elem) for elem in df.columns]\n df.columns = [re.sub(\"-\", \"_\", elem) for elem in df.columns]\n df.columns = [re.sub(\"'\", \"\", elem) for elem in df.columns]\n df.columns = [re.sub(\",\", \"_\", elem) for elem in df.columns]\n df.columns = [re.sub(\":\", \"_\", elem) for elem in df.columns]\n df.columns = [re.sub(\"<\", \"MIN\", elem) for elem in df.columns]\n df.columns = [re.sub(\">\", \"MAG\", elem) for elem in df.columns]\n df.columns = [re.sub(\"&\", \"E\", elem) for elem in df.columns]\n df.columns = [re.sub(\"°\", \"\", elem) for elem in df.columns]\n df.columns = [re.sub(\"%\", \"PERC\", elem) for elem in df.columns]\n df.columns = [re.sub(\"\\\\+\", \"_\", elem) for elem in df.columns]\n # String upper\n df.columns = [elem.upper() for elem in df.columns]\n # Trim\n df.columns = [elem.strip() for elem in df.columns]\n # Cut recurring underscore\n df.columns = [re.sub(\"_+\", \"_\", elem) for elem in df.columns]\n return(df)", "def rename_columns(self):\n self.data.rename(columns={\n 'DealerID': 'd_id',\n 'DealerName': 'd_name',\n 'Type': 'stock_type', # Needs capitalization\n 'Stock': 'stock_id',\n 'VIN': 'vin',\n 'Year': 'year',\n 'Make': 'make',\n 'Model': 'model',\n 'Body': 'trim', # Needs parsing\n 'Trim': 'body_style',\n 'Doors': 'doors',\n 'ExtColor': 'exterior_colour',\n 'IntColor': 'interior_colour',\n 'EngCylinders': 'cylinders',\n 'EngDisplacement': 'displacement', # Needs parsing\n 'Transmission': 'transmission_description', # Needs parsing and split\n 'Odometer': 'odometer',\n 'Price': 'price',\n 'MSRP': 'msrp',\n 'Description': 'description',\n 'EngType': 'configuration',\n 'EngFuel': 'fuel_type',\n 'Drivetrain': 'drivetrain',\n 'ExtColorGeneric': 'exterior_colour_generic', # Needs parsing\n 'IntColorGeneric': 'interior_colour_generic', # Needs parsing\n 'PassengerCount': 'passengers'\n }, inplace=True)\n\n return None", "def replace_english_names(df, column_name):\n parent_dir = pkg_resources.resource_filename(\"linkage.model.to_replace\", \"\")\n file_name = 'english_names_german_cities.json'\n\n column = df[column_name]\n\n # Replace English names with their German equivalents\n column = replace_with_json(column, parent_dir, file_name)\n\n # Update the dataframe\n df.loc[:, column_name] = column", "def test_duplicated_column_names(suffix: str) -> None:\n path = rsc / duplicated_column_names_file\n df = read_ods(path.with_suffix(suffix), 1)\n\n assert isinstance(df, pd.DataFrame)\n assert len(df.columns) == 4\n assert \"website.1\" in df.columns", "def parse_columns(self):\n self.data['ID'], self.data['SSSSSSSS.mmmuuun'] = self.data['ID SSSSSSSS.mmmuuun'].str.split(' ', 1).str\n self.data['SSSSSSSS.mmmuuun'] = self.data['SSSSSSSS.mmmuuun'].astype(str).str.strip()", "def crudo_rename(fecha):\n os.rename(\"/var/lib/reportes-zabbix/crudos/Merged-Trends-{}.pickle\".format(datetime.today().date()),\"/var/lib/reportes-zabbix/crudos/Merged-Trends-{}.pickle\".format(fecha))\n os.rename(\"/var/lib/reportes-zabbix/crudos/Merged-Trends-{}_ONT.pickle\".format(datetime.today().date()),\"/var/lib/reportes-zabbix/crudos/Merged-Trends-{}_ONT.pickle\".format(fecha))", "def change_col_prefix(df, old_prefix, new_prefix ):\n op_regex = old_prefix + '.+'\n op_cols = list(df.filter(regex=op_regex).columns)\n np_cols = [col.replace(old_prefix,new_prefix) for col in op_cols]\n rename_map = {x[0]:x[1] for x in zip(op_cols, np_cols)}\n return df.rename(columns=rename_map)", "def rename(self, newname):\n # set the new column name\n self.colname = newname", "def fix_name(row, index, name_map):\n # print(\"Input row: {}\".format(row))\n name = row[index].strip()\n # print(\"Name entry is {}\".format(name))\n if name.endswith(\" (yourself)\"):\n name = name[:-len(\" (yourself)\")]\n # print(\"Shortening to |{}|\".format(name))\n if name not in name_map:\n name_map[name] = name # Initially the identity transform\n row[index] = name_map[name]", "def create_charm_name_from_importable(charm_name):\n # _ is invalid in charm names, so we know it's intended to be '-'\n return charm_name.replace(\"_\", \"-\")", "def _generate_expanded_column_names(self):\n\n names = []\n # Get names of the descriptors\n des_names = [column for column in self.descriptor_dataframe][1:]\n\n # Generate expanded descriptor names for each compound\n for i in range(self.total_compounds):\n for des_name in des_names:\n name = 'compund_{}_{}'.format(i, des_name)\n names.append(name)\n\n return names", "def FixColumnLabels(cv):\n l = []\n for label in cv[0].columns:\n if \"-\" not in label and label != \"Elapsed\":\n l.append(label + \"-UT\")\n if \"-\" in label or label == \"Elapsed\":\n l.append(label)\n\n for d in cv:\n d.columns = l\n\n return cv", "def normalise_key(self, key):\n key = key.replace('-', '_')\n if key.startswith(\"noy_\"):\n key = key[4:]\n return key", "def get_reverse_column(column):\n for suffix, other in [('_1', '_2'), ('_2', '_1')]:\n if column.endswith(suffix):\n return column[:-len(suffix)] + other\n return column", "def task_1_fix_names_start_letter(data: DT) -> DT:\n for dic in data:\n if dic.get('name'):\n dic['name'] = dic['name'].capitalize()\n return data", "def update_column_title(col):\n col_type = self.features_bucket_mapping_.get(col).type\n return [f\"Feature '{col}'\"], [col_type]", "def get_fixed_filename(filename):\n initial_changed_name = filename.replace(\" \", \"_\").replace(\".TXT\", \".txt\")\n previous_character = \"\"\n new_name = \"\"\n for i, character in enumerate(initial_changed_name):\n current_character = character\n if previous_character == \"_\" and current_character.islower():\n new_name += current_character.upper()\n elif previous_character == \"(\" and current_character.islower():\n new_name += current_character.upper()\n elif previous_character.islower() and current_character.isupper():\n new_name += \"_{}\".format(current_character)\n else:\n new_name += current_character\n previous_character = character\n return new_name", "def selectnamesD(data):\n col = []\n for name in list(data):\n if name.startswith('hyperactive_'):\n col.append(name)\n else:\n col = col\n \n return col", "def _rearrange_columns(self, df):\n if self.all_columns is None:\n content_columns = [c for c in df.columns if not c.startswith(\"_\")]\n indicator_columns = [\"__in_{}\".format(t) for t in self.table_names\n ] if self.add_full_join_indicators else []\n fanout_columns = _get_fanout_columns(\n self.table_info) if self.add_full_join_fanouts else []\n self.all_columns = content_columns + indicator_columns + fanout_columns\n df = df[self.all_columns]\n if not self.disambiguate_column_names:\n df.columns = [\n c if c.startswith(\"_\") else c.split(\":\")[1] for c in df.columns\n ]\n return df", "def final_rename(understat_no_similar, fpl_no_similar, join = 'inner'): \n name_mapper = {'Adrián':'Adrián Bernabé', # Contains both seasons corrections\n 'Alisson':'Alisson Ramses Becker',\n 'Allan':'Allan Marques Loureiro',\n 'André Gomes':'André Filipe Tavares Gomes',\n 'Angelino':'José Ángel Esmorís Tasende',\n 'Bernard':'Bernard Anício Caldeira Duarte', # Everton\n 'Bernardo Silva':'Bernardo Mota Veiga de Carvalho e Silva', # Manchester City\n 'Bernardo':'Bernardo Fernandes da Silva Junior', # \n 'Borja Bastón':'Borja González Tomás',\n 'Chicharito':'Javier Hernández Balcázar',\n 'David Luiz':'David Luiz Moreira Marinho', \n 'Ederson':'Ederson Santana de Moraes',\n 'Emerson':'Emerson Palmieri dos Santos',\n 'Fabinho':'Fabio Henrique Tavares',\n 'Felipe Anderson':'Felipe Anderson Pereira Gomes',\n 'Fred':'Frederico Rodrigues de Paula Santos', # Manchester United\n 'Hélder Costa': 'Hélder Wander Sousa de Azevedo e Costa', # Leeds\n 'Joelinton':'Joelinton Cássio Apolinário de Lira', # Chelsea\n 'Jonny':'Jonathan Castro Otto', # Wolves\n 'Jorginho':'Jorge Luiz Frello Filho', # Chelsea\n 'Jota':'José Ignacio Peleteiro Romallo',\n 'Kepa':'Kepa Arrizabalaga',\n 'Kiko Femenía':'Francisco Femenía Far',\n 'Lucas Moura':'Lucas Rodrigues Moura da Silva',\n 'Pedro': 'Pedro Rodríguez Ledesma', # Chelsea\n 'Raphinha':'Raphael Dias Belloli',\n 'Ricardo Pereira':'Ricardo Domingos Barbosa Pereira',\n 'Rodri':'Rodrigo Hernandez',\n 'Rúben Dias':'Rúben Santos Gato Alves Dias',\n 'Rúben Vinagre':'Rúben Gonçalo Silva Nascimento Vinagre',\n 'Semi Ajayi':'Oluwasemilogo Adesewo Ibidapo Ajayi',\n 'Trézéguet':'Mahmoud Ahmed Ibrahim Hassan', # Aston Villa\n 'Wesley':'Wesley Moraes',\n 'Willian':'Willian Borges Da Silva',\n }\n understat_no_similar['player_name'] = understat_no_similar['player_name'].map(name_mapper)\n manual_merge = pd.merge(fpl_no_similar, understat_no_similar, left_on=['player_name', 'kickoff_time'],\n right_on=['player_name', 'date'], how=join) # Merge using player name and date of game\n return manual_merge", "def fix_turnstile_data(filenames):\n for name in filenames:\n f_in = open(name, 'r')\n f_out = open('updated_' + name, 'w')\n reader_in = csv.reader(f_in, delimiter=',')\n writer_out = csv.writer(f_out, delimiter=',')\n\n for line in reader_in:\n part_1 = line[0]\n part_2 = line[1]\n part_3 = line[2]\n for i in range(3, len(line), 5):\n writer_out.writerow(\n [part_1, part_2, part_3, line[i], line[i + 1], line[i + 2], line[i + 3], line[i + 4]])\n f_in.close()\n f_out.close()", "def get_col_names(fname):\n with open(fname) as f:\n cols = f.readline().strip(\"#\\n\").lower()\n cols = (re.sub(r'\\(\\d+\\)', '', cols)\n .replace('/', '_to_')\n .split())\n return cols", "def rename_columns(self, col):\n try:\n self.cleaned_data.columns = col\n except Exception as e:\n raise e", "def fix_fasta(database_names):\n for file in database_names:\n file_mod = file.replace(\".fasta\", \"_mod.fasta\")\n with open(file, 'r') as f:\n lines = f.readlines()\n new_lines = []\n for line in lines:\n if '|' in line and \">\" not in line:\n # we replace spaces in header line with \"__\"\n # so I can manipulate that later as biopython doesn't\n # like \"__\"\n new_line = \">\"+line.replace(\" \", \"__\")\n new_lines.append(new_line)\n else:\n new_lines.append(line)\n with open(file_mod, 'w') as f:\n for line in new_lines:\n f.write(line)", "def rename_columns(df, prefix='x'):\n df = df.copy()\n df.columns = [prefix + str(i) for i in df.columns]\n return df", "def create_importable_name(charm_name):\n return charm_name.replace(\"-\", \"_\")", "def wrangle_religion():\n df = pd.read_excel(\"U_S_Religion_Census_2010 _clean_data.xlsx\")\n df.columns = df.columns.str.replace(\" \", \"_\")\n df.columns = df.columns.str.replace(\",\", \"\")\n return df", "def _download_rename(filename):\n url_loc = \"http://www.stsci.edu/~kgordon/beast/\"\n fname_dld = download_file(\"%s%s\" % (url_loc, filename))\n extension = filename.split(\".\")[-1]\n fname = \"%s.%s\" % (fname_dld, extension)\n os.rename(fname_dld, fname)\n return fname", "def underscore_and_slash_to_space(data: pd.Series) -> pd.Series:\n return data.replace(r'[\\_/-]', value=' ', regex=True)", "def get_source_fullname(col_name):\n src_dump = get_src_dump()\n info = src_dump.find_one({\"$where\":\"function() {if(this.upload) {for(var index in this.upload.jobs) {if(this.upload.jobs[index].step == \\\"%s\\\") return this;}}}\" % col_name})\n if info:\n name = info[\"_id\"]\n if name != col_name:\n # col_name was a sub-source name\n return \"%s.%s\" % (name,col_name)\n else:\n return name", "def rename_column(self, identifier, column_id, name, datastore):\n # Raise ValueError if given colum name is invalid\n if not is_valid_name(name):\n raise ValueError('invalid column name \\'' + name + '\\'')\n # Get dataset. Raise exception if dataset is unknown\n dataset = datastore.get_dataset(identifier)\n if dataset is None:\n raise ValueError(\"unknown dataset '{}'\".format(identifier))\n # Rename the column.\n df = dataset.to_dataframe()\n df = vizual.rename_columns(df=df, colids=[column_id], names=[name])\n # Store updated dataset to get new identifier.\n ds = datastore.update_dataset(\n origin=dataset,\n df=df,\n annotations=dataset.annotations\n )\n return VizualApiResult(ds)", "def run(self, col_name):\n path_to_result_folder = self.inputs[\"path_to_result_folder\"].read()\n path_to_toc = self.inputs[\"pathToToc\"].read()\n path_to_text = os.path.join(path_to_result_folder, \"extract\", \"GOCR\", \"text\", \"fulltext_annotation.txt\")\n dtb = create_dtb(path_to_toc, path_to_text, col_name)\n path_to_dtb_json_file = os.path.join(path_to_result_folder, \"DTB.json\")\n with open(path_to_dtb_json_file, \"w\") as outfile:\n json.dump(dtb, outfile, indent=4)\n self.outputs[\"path_to_dtb_json_file\"].write(path_to_dtb_json_file)", "def test_strip_column_prefix():\n assert strip_column_prefix(\"ts_some_name\") == \"some_name\"\n assert strip_column_prefix(\"some_name\") == \"some_name\"\n # only the prefix may be stripped\n assert strip_column_prefix(\"ts_some_ts_name\") == \"some_ts_name\"\n assert strip_column_prefix(\"some_ts_name\") == \"some_ts_name\"", "def selectnamesC(data):\n col = []\n for name in list(data):\n if name.startswith('headstrong_'):\n col.append(name)\n else:\n col = col\n \n return col", "def test_format_name():\n testname = \"buildup/reference/comsol_solution/v.csv.bz2\"\n result = loader.format_name(testname)\n\n assert \"v.csv\" == result", "def clear_columns(prefixlist,datas,style=0, inplace=False):\n func = {0: str.lower,\n 1: str.upper,\n 2: str.capitalize}\n\n ori_columns=datas.columns.tolist()\n ccc=rem_str(prefixlist,ori_columns)\n ccc=rem_str('_',ccc)\n# ccc=[c.lower() for c in ccc]\n ccc=[func[style](c) for c in ccc]\n\n d = {key: value for (key, value) in zip(ori_columns,ccc)}\n datas_renamed=datas.rename(columns=d,inplace=inplace)\n new_datas=datas if inplace else datas_renamed\n\n u, i = np.unique(new_datas.columns, return_index=True)\n y=u[np.argsort(i)]\n\n r=[new_datas.columns.tolist().index(rr)for rr in y]\n\n return new_datas.iloc[:, r]", "def _manage_cols(df, drop_list=[], name_dict={}):\n\n for colname in drop_list:\n if colname not in df:\n raise ValueError(f\"Can't drop column '{colname}' - '{colname}' does not exist in dataframe\")\n for colname in list(name_dict.keys()):\n if colname not in df:\n raise ValueError(f\"Can't rename '{colname}' to '{name_dict[colname]}' - '{colname}' does not exist in dataframe\")\n if colname in drop_list:\n raise ValueError(f\"Can't rename '{colname}' to '{name_dict[colname]}' - '{colname}' in drop_list\")\n\n column_names = np.setdiff1d(list(df.columns), list(name_dict.keys()))\n lower_columns = [name.lower().replace(' ','').replace('_','') for name in column_names]\n for i in range(len(column_names)):\n name_dict[column_names[i]] = lower_columns[i]\n \n df = df.drop(drop_list, axis=1)\n df = df.rename(columns=name_dict)\n \n return df", "def clean_headers(df):\n filtered_headers = [header.replace(\"'\",'').replace(' ', '').replace('(', '').replace(')', '').replace('.', '').replace('[', '').replace(']', '') for header in df.columns]\n map_to_new_headers = {}\n for i in range(len(df.columns)):\n map_to_new_headers[df.columns[i]] = filtered_headers[i]\n\n return df.rename(columns = map_to_new_headers)", "def reverse_update_source_names(apps, schema_editor):\n Source = apps.get_model(\"vast_pipeline\", \"Source\")\n while Source.objects.filter(name__startswith=\"J\").exists():\n # do the updates in transaction batches of 1000 in case the source table is large\n with transaction.atomic():\n for source in Source.objects.filter(name__startswith=\"J\")[:1000]:\n source.name = (\n f\"ASKAP_{deg2hms(source.wavg_ra, precision=2)}\"\n f\"{deg2dms(source.wavg_dec, precision=2)}\"\n ).replace(\":\", \"\")\n source.save()", "def rename_columns(self, specs):\n import hxl.filters\n return hxl.filters.RenameFilter(self, specs)", "def _with_underscore(cls, key):\n try:\n return key.replace(\"-\", \"_\")\n except AttributeError:\n return key", "def __clean_repeated_columns(self, df, column_type):\n for column in df.columns:\n if column_type in column.lower():\n # Fill main column with data from \"prefix + _\" type column names.\n df[column_type[:-1]].fillna(df[column], inplace=True)\n # Drop the \"prefix + _\" type column names.\n df.drop(column, axis=1, inplace=True)", "def clean_names(infile, outfile=DEFAULT_OUTPUT, col=\"Name\", all=False):\n print(\"Processing and exporting, please wait...\")\n\n ROMAN = ['I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX', 'X']\n if outfile:\n try:\n of = open(outfile, 'w')\n except:\n outfile = None\n\n with open(infile) as f:\n reader = csv.DictReader(f)\n if outfile:\n writer = csv.DictWriter(of, fieldnames=reader.fieldnames +\n ['uniqid', 'FirstName',\n 'MiddleInitial/Name', 'LastName',\n 'RomanNumeral', 'Title', 'Suffix'])\n writer.writeheader()\n rowid = 0\n allnames = []\n allnameswithid = []\n for r in reader:\n rname = r[col]\n for name in re.split('[&/]', rname):\n name, n = re.subn(r'\\s*\\(.*\\)\\s*', ' ', name)\n if n > 0:\n #print \"Remove Parenthesis...\", name\n pass\n name, n = re.subn(r'\\s*[\\'\"].*[\\\"\\']\\s*', ' ', name)\n if n > 0:\n #print \"Remove Quote...\", name\n pass\n name = HumanName(name)\n if name.last == '':\n a = name.suffix.split(',')\n if len(a) >= 2:\n name = HumanName(name.first + ', ' + a[1] + ' ' + a[0])\n first = name.first.lower()\n mid = name.middle.lower()\n roman = \"\"\n title = name.title\n\n last = \"\"\n suffix_list = []\n for s in name.suffix.split(','):\n if s.strip() in ROMAN:\n roman = s\n last = name.last.lower() + ' ' + roman.strip().lower()\n else:\n suffix_list.append(s)\n if last == \"\":\n last = name.last.lower()\n suffix = ', '.join(suffix_list)\n\n if last == '':\n print(repr(name))\n\n # Fixed ROMAN and Title in Middle\n if mid != \"\":\n m_list = mid.split()\n m = m_list[-1].strip()\n m = m.strip('.')\n if len(m_list) > 1 and m.upper() in ROMAN:\n roman = m\n mid = ' '.join(m_list[:-1])\n #print rname, \"==>\", roman, \"==>\", mid\n if m in ['mr', 'ms']:\n title = m\n mid = ' '.join(m_list[:-1])\n #print rname, \"==>\", title, \"==>\", mid\n\n # Adhoc fixed for Title\n if title in ['POPE', \"BARON\", \"MAHDI\"]:\n first = title + ' ' + first\n #print rname, \"==>\", title, \"==>\", first\n title = \"\"\n\n # Standardize Jr/Sr suffix\n suffix = re_std_suffix.sub(r'\\1.', suffix + ' ').strip()\n\n # Standardize Middle Initial\n std_mid = []\n for m in mid.split():\n if len(m) == 1:\n m = m + '.'\n std_mid.append(m)\n mid = ' '.join(std_mid)\n\n if all or (first, mid, last) not in allnames:\n rowid += 1\n r['uniqid'] = rowid\n allnameswithid.append((r['uniqid'], first, mid, last,\n r['seat'].strip()))\n allnames.append((first, mid, last))\n #print \"Add...\", r['uniqid'], first, \"-\", mid, \"-\", last, \"-\", r['seat'].strip()\n s = {'FirstName': first.upper(),\n 'MiddleInitial/Name': mid.upper(),\n 'LastName': name.last,\n 'RomanNumeral': roman.upper(),\n 'Title': title.upper(),\n 'Suffix': suffix.upper()}\n t = dict(r, **s)\n if outfile:\n writer.writerow(t)\n if outfile:\n of.close()\n print(\"Done.\")\n return allnameswithid\n return None", "def rename(self, name, new_name):\n renames = {}\n if new_name in self._data.columns:\n msg = \"Cannot rename '{}' into '{}'. Column name already exists!\"\n raise ValueError(msg.format(name, new_name))\n\n self._in_blacklist(new_name)\n self._check_against_weak_dupes(new_name)\n\n if not self._dimensions_comp == 'ignore':\n self.undimensionize([name] + self.sources(name))\n name = self._dims_free_arr_name(name)\n\n for no, s in enumerate(self.sources(name), start=1):\n if '_' in s and s.split('_')[-1].isdigit():\n new_s_name = '{}_{}'.format(new_name, s.split('_')[-1])\n else:\n new_s_name = '{}_{}'.format(new_name, no)\n self._add_all_renames_to_mapper(renames, s, new_s_name)\n\n self._add_all_renames_to_mapper(renames, name, new_name)\n\n self.rename_from_mapper(renames)\n\n if self._dimensions_comp and not self._dimensions_comp == 'ignore':\n self.dimensionize(new_name)\n\n return None", "def fix_filename(urlTitle):\n fixed = urlTitle.replace('//', '/')\n fixed = fixed.replace('*', 'xXx')\n return fixed" ]
[ "0.5891028", "0.582513", "0.5810645", "0.5772185", "0.5601228", "0.558993", "0.55504906", "0.55083317", "0.547578", "0.5473249", "0.54711485", "0.54481345", "0.5434032", "0.54124486", "0.53992754", "0.5395076", "0.5377607", "0.532118", "0.53113216", "0.52825606", "0.5271922", "0.52561927", "0.52454334", "0.52450347", "0.5241281", "0.5230566", "0.5221803", "0.51867837", "0.51824003", "0.5178063", "0.51714104", "0.5170687", "0.51685977", "0.51685655", "0.5164886", "0.51615614", "0.5157768", "0.5146902", "0.51294744", "0.5128702", "0.51192135", "0.50734484", "0.50718206", "0.50578487", "0.5053509", "0.5042734", "0.5029653", "0.50056666", "0.49960035", "0.49952915", "0.49814335", "0.49719784", "0.49543256", "0.49522758", "0.495075", "0.49470484", "0.4940393", "0.49260265", "0.49179634", "0.49170387", "0.49113908", "0.49057475", "0.4889322", "0.4883483", "0.4876945", "0.48722368", "0.48707154", "0.4866304", "0.48617026", "0.4857362", "0.48560667", "0.48519528", "0.48491263", "0.48448238", "0.48327896", "0.48293856", "0.48275554", "0.48272434", "0.48248047", "0.48211798", "0.4819255", "0.4816754", "0.48108122", "0.48029727", "0.4801241", "0.47977632", "0.47965983", "0.47915927", "0.4786699", "0.47865984", "0.4786276", "0.47758406", "0.47740677", "0.47596717", "0.47578204", "0.47515798", "0.4750755", "0.47500646", "0.4749477", "0.4747691" ]
0.586284
1
download and save historical + new dataset in complte dataset
def save_today_up_to_date_local_copy(url_hist, url_new): df_historical = download_datasets_csv(url_hist) df_new = download_datasets_csv(url_new) # creating complete dataset complete_dataset = df_historical.append(df_new, ignore_index=True) # saving dataset today = date.today().strftime("%d_%m_%Y") complete_dataset.to_csv(f"complete_dataset_{today}.csv", sep=",", index=False) return complete_dataset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_data_and_save():\n url = 'https://github.com/djay/covidthailand/wiki/combined.csv'\n s=requests.get(url).content\n global df\n global last_updated\n df=pd.read_csv(io.StringIO(s.decode('utf-8')), parse_dates= ['Date'])\n df.to_parquet(file_name, compression='UNCOMPRESSED')\n df.to_csv('jaydata.csv')\n last_updated = df['Date'][df.index[-1]].strftime(\"%d %B %Y\")\n\n url = 'https://raw.githubusercontent.com/wiki/djay/covidthailand/vaccinations.csv'\n s=requests.get(url).content\n global vac_df\n vac_df=pd.read_csv(io.StringIO(s.decode('utf-8')), parse_dates= ['Date'])\n vac_df.to_parquet('vaccination.parquet', compression='UNCOMPRESSED')\n\n print(\"Data downloaded and saved successfully. Data up to \" + last_updated)", "def downloadFile()-> None:\n logging.info(f\"Downloading current data set {getTime()}\")\n with open(DATA_FILE,\"wb\") as f:\n f.write(get(\"https://covid.ourworldindata.org/data/owid-covid-data.csv\").text.encode())\n logging.info(f\"Finished Downloading current data set {getTime()}\")", "def download():\r\n reader = GSODDataReader()\r\n year_list = range(2001, 2012)\r\n austin = reader.collect_data(year_list, exact_station=True,\r\n station_name='AUSTIN CAMP MABRY', state='TX', country='US')\r\n houston = reader.collect_data(year_list, exact_station=True,\r\n station_name='HOUSTON/D.W. HOOKS', state='TX', country='US')\r\n new_york = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEW YORK/LA GUARDIA', state='NY', country='US')\r\n newark = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEWARK INTL AIRPORT', state='NJ', country='US')\r\n punta_arenas = reader.collect_data(year_list, exact_station=True,\r\n station_name='PUNTA ARENAS', country='CH')\r\n wellington = reader.collect_data(year_list, exact_station=True,\r\n station_name='WELLINGTON AIRPORT', country='NZ')\r\n store = HDFStore('weather.h5')\r\n store['austin'] = austin\r\n store['houston'] = houston\r\n store['nyc'] = new_york\r\n store['newark'] = newark\r\n store['punta_arenas'] = punta_arenas\r\n store['wellington'] = wellington\r\n store.close()", "def download(self, verbose):\n\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from COVID-19 Open Data by Google Cloud Platform https://github.com/GoogleCloudPlatform/covid-19-open-data\")\n # Index\n i_cols = [\"location_key\", \"country_name\", \"subregion1_name\", \"subregion2_name\", \"iso_3166_1_alpha_3\"]\n i_df = pd.read_csv(self.URL_I, usecols=i_cols)\n # Mobility\n m_df = pd.read_csv(self.URL_M)\n m_df = (m_df.set_index([\"date\", \"location_key\"]) + 100).reset_index()\n # Combine data\n df = m_df.merge(i_df, how=\"left\", on=\"location_key\")\n # Location (country/province)\n df = df.loc[df[\"subregion2_name\"].isna()]\n df[self.PROVINCE] = df[\"subregion1_name\"].fillna(self.UNKNOWN).apply(unidecode)\n df[\"country_name\"] = df[\"country_name\"].replace(\n {\n # CIV\n \"Ivory Coast\": \"Cote d'Ivoire\",\n }\n )\n return df", "def download(self, verbose):\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from Our World In Data https://github.com/owid/covid-19-data/\")\n # Vaccinations\n v_rec_cols = [\n \"date\", \"location\", \"iso_code\", \"total_vaccinations\", \"people_vaccinated\", \"people_fully_vaccinated\"]\n v_rec_df = pd.read_csv(self.URL_V_REC, usecols=v_rec_cols)\n v_loc_df = pd.read_csv(self.URL_V_LOC, usecols=[\"location\", \"vaccines\"])\n v_df = v_rec_df.merge(v_loc_df, how=\"left\", on=\"location\")\n # Tests\n pcr_rec_cols = [\"ISO code\", \"Date\", \"Daily change in cumulative total\", \"Cumulative total\"]\n pcr_df = pd.read_csv(self.URL_P_REC, usecols=pcr_rec_cols)\n pcr_df = pcr_df.rename(columns={\"ISO code\": \"iso_code\", \"Date\": \"date\"})\n pcr_df[\"cumsum\"] = pcr_df.groupby(\"iso_code\")[\"Daily change in cumulative total\"].cumsum()\n pcr_df = pcr_df.assign(tests=lambda x: x[\"Cumulative total\"].fillna(x[\"cumsum\"]))\n # Combine data (vaccinations/tests)\n df = v_df.set_index([\"iso_code\", \"date\"])\n df = df.combine_first(pcr_df.set_index([\"iso_code\", \"date\"]).loc[:, [\"tests\"]])\n df = df.reset_index()\n # Location (country/province)\n df[\"location\"] = df[\"location\"].replace(\n {\n # COG\n \"Congo\": \"Republic of the Congo\",\n }\n )\n df = df.loc[~df[\"iso_code\"].str.contains(\"OWID_\")]\n df[\"location\"] = df.groupby(\"iso_code\")[\"location\"].bfill()\n df.loc[df[\"location\"] == df[\"iso_code\"], \"location\"] = None\n df.loc[df[\"location\"].isna(), \"location\"] = df.loc[df[\"location\"].isna(), \"iso_code\"].apply(\n lambda x: coco.convert(x, to=\"name_short\", not_found=None))\n df[self.PROVINCE] = self.UNKNOWN\n return df", "def download_dataset(self):\n raise NotImplementedError", "def save_data(self):\n # Command to get the download data\n pass", "def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()", "def _download_data(self):\n logger.info('Downloading ChemIDplus data...')\n outfile_path = self._src_data_dir / self._src_fname\n\n self._ftp_download(self._src_server,\n self._src_dir_path,\n self._src_data_dir,\n self._src_fname)\n\n parser = ET.iterparse(outfile_path, ('start', 'end'))\n date = next(parser)[1].attrib['date']\n version = date.replace('-', '')\n outfile_path.rename(self._src_data_dir / f'chemidplus_{version}.xml')\n logger.info('Finished downloading ChemIDplus data')", "def fetch(self,url=URL):\n\t\tlog.info('downloading latest PHE case data')\n#\t\tself.data=lookup_json(url)\n\t\tself.fetch_csv() #JSON discontinued; switched back to CSV\n\t\tself.edition=self.latest_samples\n\t\tlog.info(f'Last samples from {self.edition}')", "def download(self):\n if not self.url:\n raise RuntimeError(self.tips)\n\n download_file_name = os.path.join(\n self.raw_path, os.path.splitext(os.path.basename(self.url))[0]\n )\n file_format = self.url.split(\".\")[-1]\n if \"amazon\" in self.url:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.json.{file_format}\"\n )\n else:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if \"1drv.ms\" in self.url:\n file_format = \"zip\"\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if not os.path.exists(raw_file_path):\n print(f\"download_file: url: {self.url}, raw_file_path: {raw_file_path}\")\n download_file(self.url, raw_file_path)\n if \"amazon\" in raw_file_path:\n # amazon dataset do not unzip\n print(\"amazon dataset do not decompress\")\n return\n elif file_format == \"gz\":\n file_name = raw_file_path.replace(\".gz\", \"\")\n with gzip.open(raw_file_path, \"rb\") as fin:\n with open(file_name, \"wb\") as fout:\n shutil.copyfileobj(fin, fout)\n else:\n shutil.unpack_archive(\n raw_file_path, self.raw_path, format=get_format(file_format)\n )\n\n if not os.path.exists(download_file_name):\n return\n elif os.path.isdir(download_file_name):\n os.rename(\n download_file_name, os.path.join(self.raw_path, self.dataset_name)\n )\n else:\n os.rename(\n download_file_name,\n os.path.join(\n self.raw_path,\n f'{self.dataset_name}.{download_file_name.split(\".\")[-1]}',\n ),\n )", "def download_prep(symbol: str, start_date: str, end_date: str):\n #download data and save \n download = Downloader(symbol,start_date, end_date)\n download.save()\n\n #prep data for NN input\n #check if file exist first\n file_path = \"./data/{}/quotes.csv\"\n if os.path.isfile(file_path.format(symbol)):\n feature = Feature_Selection.read_csv(symbol, file_path.format(symbol))\n feature.calculate_features()\n feature.normalize_data()\n feature.save_stock_data()\n feature.save_normalized_data()\n else:\n print(\"File does not exist:\",file_path.format(symbol))", "def download_source():\n \n #if os.path.exists(UNFCC_FILE): \n # os.rename(UNFCC_FILE,'old_'+UNFCC_FILE)\n #if os.path.exists(EBAL_FILE):\n # os.rename(EBAL_FILE,'old_'+EBAL_FILE)\n\n try:\n unsd = sdmx.Request('UNSD')\n sdmx.logger.setLevel(logging.INFO)\n \n logger.info('Loading UNFCC Data')\n resp_unfcc = unsd.data('DF_UNData_UNFCC')\n\n logger.info('Loading UN Energy Balance Data')\n resp_ebal = unsd.data('DF_UNData_EnergyBalance')\n except Exception as e:\n logger.error('Error!! Please look at SDMX logs to troubleshoot' + str(e))\n traceback.print_exc(file = sys.stdout)\n\n try:\n df_ebal = resp_ebal.to_pandas()\n df_unfcc = resp_unfcc.to_pandas()\n\n df_unfcc.reset_index().to_csv(UNFCC_FILE,index=False)\n logger.info('UNFCC Greenhouse Data stored as {}'.format(UNFCC_FILE))\n\n df_ebal.reset_index().to_csv(EBAL_FILE,index=False)\n logger.info('UN Energy Balance Data stored as {}'.format(EBAL_FILE))\n except Exception as e:\n logger.error('Error!! While saving data from SDMX to CSV ' + str(e))\n traceback.print_exc(file = sys.stdout)", "def download(dataset_name,dataset_url):\n directory = \"tmp\"\n if not os.path.exists(os.path.join(directory,dataset_name)):\n os.makedirs(os.path.join(directory,dataset_name))\n for url, filename in get_all_data(dataset_url):\n if not os.path.exists(os.path.join(directory,dataset_name,filename)):\n print(\"Downloading \"+filename+\":\",)\n ul.urlretrieve(url,os.path.join(directory,dataset_name,filename),reporthook)\n unzip_ecco_tcp_xmls(os.path.join(directory, dataset_name), os.path.join(directory, dataset_name + \"_unzipped\"))\n shutil.rmtree(os.path.join(directory, dataset_name))\n shutil.move(os.path.join(directory, dataset_name + \"_unzipped\"), os.path.join(directory, dataset_name))\n headers_to_csv(directory, dataset_name)\n corpus_to_csv(directory, dataset_name)\n erase_all_files_with_extension(directory, dataset_name, \".hdr\")\n erase_all_files_with_extension(directory, dataset_name, \".xml\")", "def download_report():\n entities = get_names()\n save_csv(entities)", "def download_dataset(url=DATASET_URL):\n df = pd.read_csv(url, index_col=0)\n \n # ディレクトリが無ければ,作成する\n if not os.path.isdir(BASE_DIR):\n os.makedirs(BASE_DIR)\n \n df.to_csv(LOCAL_FILE_NAME)", "def download_entire_db(storage_path=DEFAULT_STORAGE,\n remove_previous=True,\n return_df=False,\n return_latest_date=False,\n write=['feather']):\n # first check if we have the latest data\n if not os.path.exists(storage_path):\n splitpath = storage_path.split('/')[1:] # first entry is blank due to home dir /\n for i, p in enumerate(splitpath, 1):\n path = '/'.join(splitpath[:i])\n if not os.path.exists(path):\n os.mkdir(path)\n\n zip_file_url = 'https://www.quandl.com/api/v3/databases/EOD/data?api_key=' + Q_KEY\n s = req.Session()\n s.mount('https', HTTPAdapter(max_retries=10))\n r = s.get(zip_file_url)\n # another possible way to deal with retries\n # while True:\n # try:\n # r = req.get(zip_file_url, timeout=10)\n # break\n # except Exception as e:\n # print(e)\n\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(path=storage_path)\n df = pd.read_csv(storage_path + \\\n z.filelist[0].filename,\n names=HEADERS,\n index_col=1,\n parse_dates=True,\n infer_datetime_format=True)\n latest_date = df.index.max().date().strftime('%Y%m%d')\n\n if 'hdf5' in write:\n df.to_hdf(storage_path + 'EOD_' + latest_date + '.h5',\n key='data',\n complib='blosc',\n complevel=9)\n\n # also write feather file so can read into R\n # have to reset the index because feather can't handle non-default index (maybe non-unique?)\n df.reset_index(inplace=True)\n if 'feather' in write:\n df.to_feather(storage_path + 'EOD_' + latest_date + '.ft')\n\n if remove_previous:\n for ext in ['h5', 'ft']:\n files = glob.glob(storage_path + 'EOD_*.' + ext)\n files = [f for f in files if len(f.split('/')[-1]) == 15] # don't want any of the small files, only full DBs\n print(sorted(files, key=os.path.getctime))\n if len(files) > 1:\n previous_file = sorted(files, key=os.path.getctime)[-2]\n print('removing', previous_file)\n os.remove(previous_file)\n\n # delete downloaded zip file\n os.remove(storage_path + z.filelist[0].filename)\n\n if return_df:\n # set index back to normal for return_df\n df.set_index('Date', inplace=True)\n return df\n elif return_latest_date:\n return pd.to_datetime(df['Date'].max().date())", "def download_datasets():\n if not os.path.exists(\"__data__/cornell/movie_conversations.txt\") \\\n or not os.path.exists(\"__data__/cornell/movie_lines.txt\"):\n subprocess.call(['scripts/download_cornell.sh'])\n if not os.path.isdir('__data__/opensubs'):\n subprocess.call(['scripts/download_opensubs.sh'])", "def download_data(origin_time, net, sta, loc, chan):\n \n dataDir_get = '/import/netapp-m-02-bay200/mseed_online/archive/'\n \n fileName = \".\".join((net, sta, \".\" + chan + \".D\",\n origin_time.strftime(\"%Y.%j\")))\n filePath = os.path.join(dataDir_get, origin_time.strftime(\"%Y\"),\n net, sta, chan + '.D', fileName)\n o_time2 = origin_time + 86400\n fileName2 = \".\".join((net, sta, \".\" + chan + \".D\",\n o_time2.strftime(\"%Y.%j\")))\n filePath2 = os.path.join(dataDir_get, o_time2.strftime(\"%Y\"),\n net, sta, chan + '.D', fileName2)\n\n if os.path.isfile(filePath):\n if origin_time.hour > 21:\n st = Stream()\n st.extend(read(filePath, starttime = origin_time - 180,\n endtime = origin_time + 3 * 3600))\n st.extend(read(filePath2, \n starttime = UTCDateTime(o_time2.year, o_time2.month, \n o_time2.day, 0, 0),\n endtime = origin_time + 3 * 3600))\n st.merge(method=-1)\n else:\n st = read(filePath, starttime = origin_time - 180,\n endtime = origin_time + 3 * 3600)\n else:\n print \"++++ cannot find the following file: \\n %s \\n++++\" % filePath\n\n if not st:\n raise RotationalProcessingException('Data not available for this'\n ' event...')\n st.trim(starttime=origin_time-180, endtime=origin_time+3*3600)\n\n print 'Download of', st[0].stats.station, st[0].stats.channel, \\\n 'data successful!'\n\n return st", "def to_walden(self):\n with tempfile.NamedTemporaryFile() as f:\n # fetch the file locally\n files.download(self.source_data_url, f.name)\n\n # add it to walden, both locally, and to our remote file cache\n add_to_catalog(self.metadata, f.name, upload=True)", "def download(data_root, version):\n if version not in GroceriesReal.GROCERIES_REAL_DATASET_TABLES.keys():\n raise ValueError(\n f\"A valid dataset version is required. Available versions are:\"\n f\"{GroceriesReal.GROCERIES_REAL_DATASET_TABLES.keys()}\"\n )\n dest_path = os.path.join(\n data_root, GroceriesReal.LOCAL_PATH, f\"{version}.zip\"\n )\n expected_checksum = GroceriesReal.GROCERIES_REAL_DATASET_TABLES[\n version\n ].checksum\n extract_folder = os.path.join(data_root, GroceriesReal.LOCAL_PATH)\n if os.path.exists(dest_path):\n logger.info(\"The dataset file exists. Skip download.\")\n try:\n validate_checksum(dest_path, expected_checksum)\n except ChecksumError:\n logger.info(\n \"The checksum of the previous dataset mismatches. \"\n \"Delete the previously downloaded dataset.\"\n )\n os.remove(dest_path)\n if not os.path.exists(dest_path):\n source_uri = GroceriesReal.GROCERIES_REAL_DATASET_TABLES[\n version\n ].source_uri\n GroceriesReal._download_http(source_uri, dest_path, version)\n GroceriesReal._extract_file(dest_path, extract_folder)", "def download():\n base_loc = DATA_DIR + '/raw/human_activity'\n loc = base_loc + '/human_activity.zip'\n if os.path.exists(loc):\n print('Path already exists at {}. If you wish to re-download you must delete this folder.'.format(loc))\n return\n if not os.path.exists(base_loc):\n os.mkdir(base_loc)\n\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00341/HAPT%20Data%20Set.zip'\n urllib.request.urlretrieve(url, loc)\n\n with zipfile.ZipFile(loc, 'r') as zip_ref:\n zip_ref.extractall(base_loc)", "def update_database():\n\n # We obtain the data from the official database\n df = getData.extractData()\n\n # We save the dataframe for later use in the API\n auxiliary.saveToCsv(df, 'app/resources')", "def download_all_data(self) -> None:\n print(\"Download in progress.\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_CHARACTERS\"], \"nextcloud\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_FRAGMENTS\"], \"nextcloud\")\n self.download_data(os.environ[\"HABBAKUK_URL\"], \"generic_url\")\n print(\"Download complete!\")", "def download(args):\n with_dataset(args, Dataset._download)", "def download_dailydialog(daily_raw_fname: str, data_path: str):\n wget.download(daily_raw_fname, data_path)\n # Manually unzip the train/dev/test files", "def fetch_save_data_for_region(reg_url):\n # get region page\n region_page = requests.get(reg_url)\n region_name = reg_url.split(\"/\")[-1].split(\".\")[0]\n region_dir = \"data/\" + region_name\n # store the data files\n if not os.path.exists(region_dir):\n os.makedirs(region_dir)\n \n print(\"starting process for\", reg_url)\n \n soup = BeautifulSoup(region_page.text, \"html.parser\")\n # create metadata array\n rows = soup.select(\"table#hor-minimalist-b tr\")\n metadata_array = [get_info_from_row(rows[i]) for i in range(1, len(rows))]\n metadata_array = [m for m in metadata_array if m is not None]\n metadata_lookup = {m[\"id\"]:m for m in metadata_array if m is not None}\n \n data_urls = [m[\"href\"] for m in metadata_array]\n for d in data_urls:\n download_file(d, region_dir+\"/\")\n print(\"len data urls:\", len(data_urls))\n\n # unzip all the files\n unzip_dir = os.getcwd() + \"/\" + region_dir\n for item in os.listdir(unzip_dir): # loop through items in dir\n if item.endswith(\".zip\"): # check for \".zip\" extension\n file_name = unzip_dir + \"/\" + item # get full path of files\n #print(file_name)\n try:\n zip_ref = zipfile.ZipFile(file_name) # create zipfile object\n zip_ref.extractall(unzip_dir) # extract file to dir\n except:\n print(\"unzip, error with file\", file_name)\n zip_ref.close() # close file\n \n \n # load dataframes individually, concat, save\n data_files = [f for f in os.listdir(region_dir) if f.endswith(\".csv\")]\n print(\"len data files\", len(data_files))\n\n data_frames = []\n for d in data_files:\n try:\n new_frame = pd.read_csv(region_dir + \"/\" + d) \n id_ = d.replace(\".csv\", \" \").replace(\"_\", \" \").strip()\n metadata = metadata_lookup[id_]\n new_frame[\"desc\"] = metadata[\"desc\"]\n new_frame[\"type\"] = metadata[\"type\"]\n new_frame[\"med_ec\"] = metadata[\"med_ec\"]\n new_frame[\"flow\"] = metadata[\"flow\"]\n new_frame[\"lat\"] = metadata[\"lat\"]\n new_frame[\"lon\"] = metadata[\"lon\"]\n \n data_frames.append(new_frame) \n except:\n print(\"load csv, error with file\", d)\n data = pd.concat(data_frames)\n print(\"data shape\", data.shape)\n data.to_csv(\"processed_data/\" + region_name +\".csv\", index=False)", "def download_dataset(self):\n dataset_name = ADE20K_URL.split(\"/\")[-1].split(\".\")[0]\n req = urllib.request.Request(ADE20K_URL, method=\"HEAD\")\n size_file = urllib.request.urlopen(req).headers[\"Content-Length\"]\n download = \"n\"\n while download != \"y\":\n if not self.yes_all:\n download = input(f\"You are about to download {dataset_name} ({size_file} bytes) to the temporary folder {self.tmp_path}. Do you want to continue? [y/n] \\n\")\n if self.yes_all or download == \"y\":\n logger.info(f\"Downloading dataset {dataset_name} at {ADE20K_URL} to temporary folder {self.tmp_path}...\")\n zip_path, hdrs = urllib.request.urlretrieve(ADE20K_URL, f\"{self.tmp_path}/{dataset_name}.zip\")\n logger.info(f\"Extracting {zip_path} to temporary folder {self.tmp_path}...\")\n with zipfile.ZipFile(f\"{zip_path}\", 'r') as z:\n z.extractall(f\"{self.tmp_path}\")\n self.input_data_path = zip_path[:-4]\n break\n elif download == \"n\":\n logger.error(f\"Cannot pursue without downloading the dataset.\")\n sys.exit()\n else:\n logger.error(\"Please enter a valid answer (y or n).\")", "def download_dataset(target_dataset, comet):\n data_paths = list(get_data_paths().values())[0]\n data_store = StoreManager(path=data_paths)\n\n logging.info('STARTING tar download')\n comet.log_dataset_info(name=target_dataset, version=None, path=data_paths)\n start = time.time()\n data_store.download_file(target_dataset)\n end = time.time()\n logging.info('DOWNLOAD time taken: ' + str(end - start))\n comet.log_dataset_hash(target_dataset)\n if target_dataset.endswith('.tar.gz'):\n logging.info('STARTING untarring')\n tf = tarfile.open(target_dataset)\n tf.extractall()\n logging.info('COMPLETING untarring')", "def _download(self, path):\n self.logger.info('Getting Million Song Dataset...')\n self.logger.info('Downloading Echo Nest Taste Subprofile train data...')\n base_url = 'http://millionsongdataset.com/sites/default/files/challenge/'\n\n download_dataset(\n base_url + 'train_triplets.txt.zip',\n join(self.data_folder, 'train.zip')\n )\n rename(join(self.data_folder, 'train'), path)\n\n self.logger.info('Downloading evaluation data for MSD Challenge...')\n download_dataset(\n base_url + 'EvalDataYear1MSDWebsite.zip',\n join(path, 'eval.zip')\n )\n rename(\n join(path, 'EvalDataYear1MSDWebsite'),\n join(path, 'evaluation')\n )\n\n self.logger.info('Downloading list of matching errors...')\n url = 'http://millionsongdataset.com/sites/default/files/tasteprofile/sid_mismatches.txt'\n download_url(url, join(path, 'sid_mismatches.txt'))", "def _download_training_data_from_data_asset_storage(self) -> 'DataFrame':\n\n # it means that it is on ICP env and it is before fit, so let's throw error\n if not self._wml_client:\n raise CannotReadSavedRemoteDataBeforeFit()\n\n # note: as we need to load a data into the memory,\n # we are using pure requests and helpers from the WML client\n asset_id = self.location.href.split('?')[0].split('/')[-1]\n\n # note: download data asset details\n asset_response = requests.get(self._wml_client.data_assets._href_definitions.get_data_asset_href(asset_id),\n params=self._wml_client._params(),\n headers=self._wml_client._get_headers(),\n verify=False)\n\n asset_details = self._wml_client.data_assets._handle_response(200, u'get assets', asset_response)\n\n # note: read the csv url\n if 'handle' in asset_details['attachments'][0]:\n attachment_url = asset_details['attachments'][0]['handle']['key']\n\n # note: make the whole url pointing out the csv\n artifact_content_url = (f\"{self._wml_client.data_assets._href_definitions.get_wsd_model_attachment_href()}\"\n f\"{attachment_url}\")\n\n # note: stream the whole CSV file\n csv_response = requests.get(artifact_content_url,\n params=self._wml_client._params(),\n headers=self._wml_client._get_headers(),\n stream=True,\n verify=False)\n\n if csv_response.status_code != 200:\n raise ApiRequestFailure(u'Failure during {}.'.format(\"downloading model\"), csv_response)\n\n downloaded_asset = csv_response.content\n\n # note: read the csv/xlsx file from the memory directly into the pandas DataFrame\n buffer = io.BytesIO(downloaded_asset)\n data = try_load_dataset(buffer=buffer,\n sheet_name=self.auto_pipeline_params.get('excel_sheet', 0),\n separator=self.auto_pipeline_params.get('csv_separator', ','),\n encoding=self.auto_pipeline_params.get('encoding', 'utf-8')\n )\n\n return data\n else:\n # NFS scenario\n connection_id = asset_details['attachments'][0]['connection_id']\n connection_path = asset_details['attachments'][0]['connection_path']\n\n return self._download_data_from_nfs_connection_using_id_and_path(connection_id, connection_path)", "def download():\n toydata = requests.get(DATA_URL).json()\n return toydata", "def download_data(self):\n headers = {'User-Agent': 'Mozilla/5.0',}\n\n #Request for html data of url page\n r = requests.get(self.url, headers = headers, allow_redirects=True)\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n #Checking if folder path exists, if not, creats it\n i=0\n while i<len(self.folder)-1:\n if self.folder[i] == '/':\n if not os.path.isdir(self.folder[:i]):\n os.mkdir(self.folder[:i])\n i+=1\n if i==len(self.folder)-1:\n if not os.path.isdir(self.folder):\n os.mkdir(self.folder)\n\n # if not os.path.isdir(self.folder):\n # os.mkdir(self.folder)\n\n #Gets every href to zip file with data\n entries = []\n for link in soup.find_all('a'):\n if re.search(\"^data/.*.zip\", link.get('href')):\n entries.append(link.get('href'))\n\n #Gets the newest dataset\n self.getCurrentData(entries)\n\n i=0\n #Saves each file in dataset\n for list in self.ListOfZipFiles:\n if not os.path.isfile(self.folder+list[4:]):\n r = requests.get(self.url+list)\n open(self.folder+list[4:], 'wb').write(r.content)\n #deletes prefix \"data/\"\n self.ListOfZipFiles[i] = list[4:]\n i+=1", "def download_dataset(url=DATASET_URL):\n # disable insecure https warning\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n c = urllib3.PoolManager()\n with c.request(\"GET\", url, preload_content=False) as res, open(\n LOCAL_FILE_NAME, \"wb\"\n ) as out_file:\n shutil.copyfileobj(res, out_file)\n logging.info(\"Download completed.\")", "def test_downloading(self):\n month = '2013-01' # smallest of available datasets\n path = download_data(month)\n self.assertTrue(os.path.isfile(path), msg='File on returned location does not exist')\n os.remove(path)", "def get_local_dataset(\n self, \n file_name: str\n ):\n pd.read_csv(file_name)\n #save", "def download_dataset():\n \n ID = \"1-3_oB5iSF-c_V65-uSdUlo024NzlgSYZ\"\n script1 = f\"\"\"\n wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id='{ID} -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=\"{ID} -O Data.zip && rm -rf /tmp/cookies.txt\n \"\"\"\n script2 = \"\"\"unzip Data.zip\"\"\"\n\n os.system(script1)\n os.system(script2)", "def import_data(loc,first_year, last_year):\n import zipfile\n import requests\n import urllib.request\n \n # Copy a network object to a local file\n\n \n table = [\"Prix\", \"Stations\", \"Services\"]\n \n for i in range(first_year, last_year + 1):\n for t in table :\n i = str(i)\n ti = t + i\n tiz = ti+\".zip\"\n ltiz = loc + tiz\n r = requests.get(ltiz)\n open(tiz, 'wb').write(r.content)\n with zipfile.ZipFile(tiz,\"r\") as zip_ref:\n zip_ref.extractall(\"data\")", "def download_files():\n #delete old files\n dataPath = Path(Path(os.getcwd()) / \"data\")\n for filename in dataPath.glob(\"*\"):\n os.unlink(filename)\n\n #download new files\n print(\"Downloading files...\")\n try:\n os.system(\"kaggle datasets download sudalairajkumar/novel-corona-virus-2019-dataset -f time_series_covid_19_confirmed.csv -p data -q\")\n os.system(\"kaggle datasets download sudalairajkumar/novel-corona-virus-2019-dataset -f time_series_covid_19_deaths.csv -p data -q\")\n os.system(\"kaggle datasets download sudalairajkumar/novel-corona-virus-2019-dataset -f time_series_covid_19_recovered.csv -p data -q\")\n print(\"Downloading files finished\")\n except:\n print(\"Error downloading files\")", "def download_and_prepare_dmipy_example_dataset(self):\r\n subject_ID = 100307\r\n self.download_subject(subject_ID)\r\n self.prepare_example_slice(subject_ID)", "def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")", "def fetch_training_data(filename, output, db_url=None):\n r2dt.write_training_data(filename, db_url, output)", "def download_coco_dataset():\n # Create file structure\n os.makedirs(os.path.join(\"data\", \"coco\", \"train\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"dev\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"test\"), exist_ok=True)\n # Download the train, dev and test datasets\n print(\"Downloading COCO dataset.\")\n url = \"http://images.cocodataset.org/zips/train2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"train2014.zip\"))\n url = \"http://images.cocodataset.org/zips/val2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"val2014.zip\"))\n url = \"http://images.cocodataset.org/zips/test2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"test2014.zip\"))\n print(\"Done downloading COCO dataset.\")\n # Unzip the files\n print(\"Extracting COCO dataset.\")\n # Extract Train dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"train2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"train2014\"),\n os.path.join(\"data\", \"coco\", \"train\", \"dummy\"),\n )\n # Extract Validation dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"val2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"val2014\"),\n os.path.join(\"data\", \"coco\", \"dev\", \"dummy\"),\n )\n # Extract Test dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"test2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"test2014\"),\n os.path.join(\"data\", \"coco\", \"test\", \"dummy\"),\n )\n print(\"Done extracting COCO dataset.\")", "def get_data():\n \n \"\"\" Prepare variables\"\"\"\n urls = {\"cases\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv\",\n \"deaths\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv\",\n \"recovered\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv\"}\n\n localnames = {\"cases\": \"Cases.csv\",\n \"deaths\": \"Deaths.csv\",\n \"recovered\": \"Recovered.csv\"}\n\n dfs = {\"cases\": None,\n \"deaths\": None,\n \"recovered\": None}\n\n \"\"\" Download\"\"\"\n for key in urls.keys():\n url = urls[key]\n localname = localnames[key]\n urllib.request.urlretrieve(url, localname)\n\n \"\"\" Load variables\"\"\"\n for key in dfs.keys():\n dfs[key] = pd.read_csv(localnames[key])\n \n \"\"\" Return\"\"\"\n return(dfs)", "def download_entire_dataset(dataset_name, num_data, labels, method, cache_dir):\n\n print('Downloading {}...'.format(dataset_name))\n preprocessor = preprocess_method_dict[method]()\n\n # Select the first `num_data` samples from the dataset.\n target_index = numpy.arange(num_data) if num_data >= 0 else None\n dataset_parts = D.molnet.get_molnet_dataset(dataset_name, preprocessor,\n labels=labels,\n target_index=target_index)\n dataset_parts = dataset_parts['dataset']\n\n # Cache the downloaded dataset.\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n for i, part in enumerate(['train', 'valid', 'test']):\n filename = dataset_part_filename(part, num_data)\n path = os.path.join(cache_dir, filename)\n NumpyTupleDataset.save(path, dataset_parts[i])\n return dataset_parts", "def download_data_slot(self):\n if self.result is None:\n self.label_current_message.setText('尚未有預測結果!請確認是否已載入資料並執行預測。')\n else:\n fileName, _ = QFileDialog.getSaveFileName(self, 'Save file', '', '*.csv') # 建立儲存檔案的對話盒(dialog)\n if fileName:\n self.result['date'] = pd.to_datetime(self.result['date'])\n raw_input_data = self.Data.copy() # 需要把原資料copy,否則直接取用的話,輸出結果會隨著下載次數而無謂增加\n output_data = raw_input_data.append(self.result.loc[:, ['date'] + [i for i in self.column_name]])\n output_data.to_csv(fileName, index = None)", "def download_hess_dr1_data():\n download_data_files(FILENAMES_HESS_DR1)", "def add_data(self, year, month):\n data = _download_to_df(self.url, self.table_name, year, month)\n if 'INTERVENTION' in data.columns:\n data = data[data['INTERVENTION'] == 0]\n data = data.loc[:, self.table_columns]\n with self.con:\n data.to_sql(self.table_name, con=self.con, if_exists='append', index=False)\n self.con.commit()", "def download_models_and_data():\n\n for file in DATA_FILES:\n download_file(file[\"url\"], file[\"path\"])", "def update_dati(lista_inquinanti, base_url):\n \n anno = str(datetime.datetime.now().year)\n return download_data(base_url, [anno], lista_inquinanti)", "def download_data(start_date, end_date, gr, instr, path):\n _from = f'{start_date.isoformat()}T00:00:00Z'\n _to = f'{end_date.isoformat()}T00:00:00Z'\n gran = gr\n instr_ = instr\n\n params = {\n \"granularity\": gran,\n \"from\": _from,\n \"to\": _to\n }\n \n print(f'Saving to path: {path}')\n \n with open(f'{path}/{instr}_{gran}_{start_date.isoformat()}_{end_date.isoformat()}.csv', \"w\") as O:\n for r in InstrumentsCandlesFactory(instrument=instr_, params=params):\n print(\"REQUEST: {} {} {}\".format(r, r.__class__.__name__, r.params))\n rv = client.request(r)\n cnv(r.response, O)\n \n print('Finished')", "def download_data():\n urllib.request.urlretrieve('http://cs.iit.edu/~culotta/cs579/a1/edges.txt.gz', 'edges.txt.gz')", "def set_data(self, year, month):\n data = _download_to_df(self.url, self.table_name, year, month)\n data = data.loc[:, self.table_columns]\n with self.con:\n data.to_sql(self.table_name, con=self.con, if_exists='replace', index=False)\n self.con.commit()", "def download_all(): #@save\n for name in DATA_HUB:\n download(name)", "def download(self):\n pass", "def download(self):\n pass", "def run_save_historical_data():\n data = get_historical_submissions(TEST_SUBREDDIT, TEST_MAX)\n save_historical_submission_comments(data, TEST_SUBREDDIT + '_past_30_months_comments.csv')", "def download_intraday_extended(conn, logger, slice='year1month1'):\n # 下载地址\n url_pattern = \"https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY_EXTENDED&symbol={}&interval=5min&slice={}&adjusted=true&apikey=\" + api_key\n Symbol = 'UVXY'\n path_root = 'stocks/data/'\n logger.info(f'Start downloading slice {slice}')\n # 判断curStocks表是否为空,否,则遍历curStocks,是,则将Stocks表中的所有股票代码插入curStocks表,遍历Stocks\n # 判断curStocks表是否为空\n cursor0 = conn.cursor()\n cursor0.execute('select count(*) from curStocks')\n result0 = cursor0.fetchall()\n count = int(result0[0][0])\n cursor0.close()\n if count == 0:\n # 遍历Stocks表\n cursor1 = conn.cursor()\n cursor1.execute('select count(*) from Stocks')\n result1 = cursor1.fetchall()\n count = int(result1[0][0])\n cursor1.execute('insert into curStocks (Symbol) select Symbol from Stocks')\n cursor1.close()\n conn.commit()\n\n while count > 0:\n # 遍历curStocks表\n cursor1 = conn.cursor()\n cursor1.execute('SELECT Symbol FROM curStocks')\n result = cursor1.fetchall()\n cursor1.close()\n\n for line in result:\n Symbol = line[0].strip()\n logger.info(f'Current stock code: {Symbol}')\n\n # 下载地址url\n url = url_pattern.format(Symbol, slice)\n logger.debug(url)\n\n try:\n # 把下载地址发送给requests模块\n f = requests.get(url, timeout=10) # 设置超时\n\n # 下载文件\n path = f'{path_root}{Symbol}_{slice}.csv'\n logger.debug(f'File saved to: {path}')\n with open(path, \"wb\") as code:\n code.write(f.content)\n\n except Exception as e:\n logger.debug(e)\n logger.error(Symbol + '下载失败')\n # time.sleep(random.randint(30, 60))\n continue\n # logger.debug(curDate)\n # 取出上次的数据日期\n cursor0 = conn.cursor()\n cursor0.execute('select max(timestamp) from IntradayQuotes where Symbol = ?;', Symbol)\n result0 = cursor0.fetchall()\n cursor0.close()\n oldDate = result0[0][0] if result0[0][0] else None\n logger.info(f'Last record in database: {oldDate}')\n # 写入数据库\n with open(path, 'r') as csvfile:\n next(csvfile)\n cursor2 = conn.cursor()\n read = csv.reader(csvfile) # 逐行读取csv文件,并写入\n for i, one_line in enumerate(read):\n newDate = datetime.datetime.strptime(one_line[0], '%Y-%m-%d %H:%M:%S')\n if oldDate and newDate <= oldDate:\n logger.info(f'Imported {i} new records.')\n break\n cursor2.execute(\"INSERT INTO IntradayQuotes VALUES (?,?,?,?,?,?,?)\", (\n Symbol, one_line[0], one_line[1], one_line[2], one_line[3], one_line[4], one_line[5]))\n else:\n logger.info(f'Imported {i + 1} new records.')\n cursor2.close()\n\n # 更新curStocks表\n cursor3 = conn.cursor()\n cursor3.execute(\"delete from curStocks where Symbol = ?\", Symbol)\n cursor3.close()\n\n conn.commit()\n wait_time = random.randint(10, 30)\n logger.debug(f'Waiting for {wait_time} seconds to continue...')\n time.sleep(wait_time)\n\n # 检查curStocks是否还有未下载的股票\n cursor4 = conn.cursor()\n cursor4.execute('select count(*) from curStocks')\n result4 = cursor4.fetchall()\n count = int(result4[0][0])\n cursor4.close()\n if count > 0:\n logger.info('本轮下载失败数量:' + str(count))\n\n logger.info(f'Slice {slice} has been downloaded.')", "def portfolio_download_data(tickers: List[str], dates: List[str],\n time_step: str) -> None:\n\n try:\n function_name: str = portfolio_download_data.__name__\n download_data_tools \\\n .function_header_print_data(function_name, tickers, dates,\n time_step)\n\n init_year = int(dates[0].split('-')[0])\n init_month = int(dates[0].split('-')[1])\n fin_year = int(dates[1].split('-')[0])\n fin_month = int(dates[1].split('-')[1])\n last_day = monthrange(fin_year, fin_month)[1]\n\n init_date: dt = dt(year=init_year, month=init_month, day=1)\n fin_date: dt = dt(year=fin_year, month=fin_month, day=last_day)\n\n # Not all the periods can be combined with the time steps.\n raw_data: pd.DataFrame = \\\n yf.download(tickers=tickers, start=init_date, end=fin_date,\n interval=time_step)['Adj Close']\n # Order DataFrame columns by sector\n raw_data = raw_data[tickers]\n\n if raw_data.isnull().values.any():\n # Remove stocks that do not have data from the initial date\n raw_data = raw_data.dropna(axis=1, thresh=len(raw_data) - 10) \\\n .fillna(method='ffill')\n\n download_data_tools.save_data(raw_data, dates, time_step)\n\n except AssertionError as error:\n print('No data')\n print(error)", "def download():\n basedir = os.path.dirname(os.path.dirname(__file__))\n print(basedir)\n datadir = os.path.join(basedir,\"data/NeonTreeEvaluation/\")\n print(\"Downloading data files to {}\".format(datadir)) \n eval_url = zenodo_url(concept_rec_id=\"3723356\", datadir=datadir)", "def _downloadDataFile(self):\n config = SiteConfiguration.objects.get()\n\n with requests.Session() as s:\n # Authentication\n data = {\n 'identificationBean.identifiant': '{}'.format(config.login),\n 'identificationBean.mdp': '{}'.format(config.password),\n 'userName': '{}'.format(config.username)\n }\n url = 'http://extranet.ffbb.com/fbi/identification.do'\n s.post(url, data=data)\n\n # Create filters\n params = (\n ('action', 'executeCsv'),\n ('rechercherRencontreSaisieResultatBean.idDivision', ''),\n ('rechercherRencontreSaisieResultatBean.rechercherEquipe2', 'O'),\n ('rechercherRencontreSaisieResultatBean.dateDebutRencontre', ''),\n ('rechercherRencontreSaisieResultatBean.dateFinRencontre', ''),\n ('rechercherRencontreSaisieResultatBean.idPoule', ''),\n ('rechercherRencontreSaisieResultatBean.numeroEquipe', ''),\n )\n\n # Get Csv file\n url = 'http://extranet.ffbb.com/fbi/rechercherCompetitionRencontre.do'\n response = s.get(url, params=params)\n\n if(response.headers['content-type'] != 'application/ms-excel;charset=UTF-8'):\n return False\n\n # Create the file\n if response.status_code == 200:\n os.makedirs(os.path.dirname(settings.DATA_PATH), exist_ok=True)\n with open(settings.DATA_PATH, 'wb') as f:\n for chunk in response:\n f.write(chunk)\n\n return True", "def _download_to_df(url, table_name, year, month):\n # Insert the table_name, year and month into the url.\n url = url.format(table=table_name, year=year, month=str(month).zfill(2))\n # Download the file.\n r = requests.get(url)\n if r.status_code != 200:\n raise _MissingData((\"\"\"Requested data for table: {}, year: {}, month: {} \n not downloaded. Please check your internet connection. Also check\n http://nemweb.com.au/#mms-data-model, to see if your requested\n data is uploaded.\"\"\").format(table_name, year, month))\n # Convert the contents of the response into a zipfile object.\n zf = zipfile.ZipFile(io.BytesIO(r.content))\n # Get the name of the file inside the zip object, assuming only one file is zipped inside.\n file_name = zf.namelist()[0]\n # Read the file into a DataFrame.\n data = pd.read_csv(zf.open(file_name), skiprows=1)\n # Discard last row of DataFrame\n data = data[:-1]\n return data", "def main():\n # the url for african daily and global daily\n african_dialy_url = \"https://data.chc.ucsb.edu/products/CHIRPS-2.0/africa_daily/tifs/p25/\"\n global_daily_url = \"https://data.chc.ucsb.edu/products/CHIRPS-2.0/global_daily/tifs/p25/\"\n\n\n each_year_list = GetRasterYears(url=african_dialy_url)\n new_path = makenewdir(each_year_list)\n years_new_list = fecthrasterurl(url=african_dialy_url)\n downloadwithwget(each_year_list, years_new_list, new_path)", "def _download_all(update_path=True, verbose=None):\n\n # iterate over dataset\n for ds in dataset_list:\n # call download\n ds().download(update_path=True, verbose=verbose, accept=True)", "def execute():\n \n # URL to the datasets part of SHARKdata.\n sharkdata_url = u'http://sharkdata.se/datasets/'\n\n # Download a list of all available datasets. The JSON format is used.\n datasets = json.load(urllib2.urlopen(sharkdata_url + u'list.json'))\n \n # Exit if no datasets are found. \n if len(datasets) < 1:\n print(u'No datasets found. Script terminated.')\n return\n\n # Print some info for all available datasets.\n print(u'\\nAvailable datasets on SHARKdata:' + u'\\n')\n for dataset in datasets:\n print(u' Datatype: ' + dataset[u'datatype'] + u' Name: ' + dataset[u'dataset_name'])\n \n # Get the name of the first dataset in the list.\n dataset_name = datasets[0][u'dataset_name']\n \n # Download header and data and print the content. The text format is used.\n print(u'\\nPrint dataset content for: ' + dataset_name + u'\\n')\n header_and_data = urllib2.urlopen(sharkdata_url + dataset_name + u'/data.txt')\n \n for row in header_and_data:\n # The text format character encoding is cp1252 (equal to windows-1252).\n row = row.decode(u'cp1252')\n# print(row.strip())\n\n # Download header and data and save to file.\n dataset_name = datasets[0][u'dataset_name']\n filename = datasets[0][u'dataset_file_name'].replace(u'.zip', u'.txt')\n character_encoding = u'utf8' # Some alternatives: cp1252, utf-8, utf-16, ascii, latin1, macroman.\n row_delimiter = u'\\r\\n'\n print(u'\\nDataset content for: ' + dataset_name + u' to file: ' + filename + u'\\n')\n out_file = None\n try:\n out_file = codecs.open(filename, mode = 'w', encoding = character_encoding)\n header_and_data = urllib2.urlopen(sharkdata_url + dataset_name + u'/data.txt')\n for row in header_and_data:\n row = row.decode(u'cp1252')\n out_file.write(row.strip() + row_delimiter)\n finally:\n if out_file: out_file.close()", "def _save_data(self):\n self.data.to_csv('data/c&le/{}'.format(self.name))", "def download_files(self):", "def get_latest_data():\n try:\n print '\\nRequesting new data.....\\n'\n response = get(\"https://api.myjson.com/bins/2csub\")\n if response.status_code is 200:\n print '\\nSuccess (200) in downloading data\\n'\n current_json = response.json()\n set_backup_data(current_json)\n else: \n current_json = get_backup_data()\n except ConnectionError:\n current_json = get_backup_data()\n return current_json", "def fetch_inspect_data(filename, output, db_url=None):\n r2dt.write_training_data(filename, db_url, output)", "def fetch_syn_data():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n url = 'https://dl.dropboxusercontent.com/u/5918983/'\n t1 = url + 't1.nii.gz'\n b0 = url + 'b0.nii.gz'\n \n folder = pjoin(dipy_home, 'syn_test')\n\n md5_list = ['701bda02bb769655c7d4a9b1df2b73a6', # t1\n 'e4b741f0c77b6039e67abb2885c97a78'] # b0\n\n url_list = [t1, b0]\n fname_list = ['t1.nii.gz', 'b0.nii.gz']\n\n if not os.path.exists(folder):\n print('Creating new directory %s' % folder)\n os.makedirs(folder)\n print('Downloading t1 and b0 volumes from the same session (12MB)...')\n\n for i in range(len(md5_list)):\n _get_file_data(pjoin(folder, fname_list[i]), url_list[i])\n check_md5(pjoin(folder, fname_list[i]), md5_list[i])\n\n print('Done.')\n print('Files copied in folder %s' % folder)\n else:\n print('Dataset is already in place. If you want to fetch it again, please first remove the folder %s ' % folder)", "def update_data():\n etf_prices = get_prices(start=START_DATE, end=END_DATE)\n etf_returns = compute_returns(etf_prices)\n merged_etf_data = etf_prices.merge(etf_returns, right_index=True, left_index=True)\n indicators = compute_indicators(merged_etf_data) # this uses the \"ta\" lib, but it does not need\n # to be imported\n merged_etf_data = merged_etf_data.merge(indicators, right_index=True, left_index=True)\n vix_data = get_vix()\n data = merged_etf_data.merge(vix_data, right_index=True, left_index=True)\n data.to_csv('Data/database.csv')\n return", "def get_data(self, url):\n # Initialize the button that needs to be pressed to get download the data\n button = None\n # While this button is of type 'None' we reload the browser\n while button is None:\n try:\n # Navigate to the URL\n self.go_to_url(url)\n # Sleep the code by the defined time plus a random number of seconds between 0s and 2s. This should\n # reduce the likelihood that Google detects us as a scraper\n time.sleep(self.sleep + 2 * np.random.rand(1))\n # Try to find the button and click it\n line_chart = self.browser.find_element_by_css_selector(\n \"widget[type='fe_line_chart']\")\n button = line_chart.find_element_by_css_selector(\n '.widget-actions-item.export')\n button.click()\n except exceptions.NoSuchElementException:\n # If the button cannot be found, try again (load page, ...)\n pass\n # After downloading, wait again to allow the file to be downloaded\n time.sleep(self.sleep)\n # Load the data from the csv-file as pandas.DataFrame object\n data = pd.read_csv(self.filename, skiprows=1)\n # Set date as index:\n if 'Day' in data.columns:\n data.Day = pd.to_datetime(data.Day)\n data = data.set_index(\"Day\")\n frequency = 'Daily'\n elif 'Week' in data.columns:\n data.Week = pd.to_datetime(data.Week)\n data = data.set_index(\"Week\")\n frequency = 'Weekly'\n else:\n data.Month = pd.to_datetime(data.Month)\n data = data.set_index(\"Month\")\n frequency = 'Monthly'\n # Sleep again\n time.sleep(self.sleep)\n # Delete the file\n while os.path.exists(self.filename):\n try:\n os.remove(self.filename)\n except:\n pass\n return data, frequency", "def download_dataset(urls, path):\n\n # check if the path exist or not\n os.makedirs(os.path.normpath(path), exist_ok=True)\n\n # Download the dataset\n for key in urls:\n _L(\"Downloading \" + _P(urls[key]) + \" in \" + _S(path))\n # if (urls[key].split('.')[-1] != 'tar'):\n os.system(\"wget {} -P {}\".format(urls[key], path))", "def get_data(retrieve = False, start='2019-01-01', comp = False):\r\n if retrieve == True:\r\n tickers = retrieve_sp500()\r\n else:\r\n with open('sp500_tickers.pickle', 'rb') as file:\r\n tickers = pickle.load(file)\r\n if not os.path.exists('sp500_data'):\r\n os.mkdir('sp500_data')\r\n exchg_close = dt.time(16,0,0,0)\r\n # use todays date if markets have closed.\r\n if dt.datetime.today().time() > exchg_close:\r\n end = dt.datetime.now()\r\n # use yesterdays dates if markets have not yet closed.\r\n else: \r\n end = dt.datetime.now() - dt.timedelta(1)\r\n for ticker in tickers:\r\n # updates data for tickers not currently stored.\r\n if not os.path.exists('sp500_data/{}.csv'.format(ticker)):\r\n df = pdr.get_data_yahoo(ticker, start, end)\r\n df.to_csv('sp500_data/{}.csv'.format(ticker))\r\n # updates data for tickers that have not been updated today.\r\n elif dt.datetime.fromtimestamp(os.path.getmtime('sp500_data/{}.csv'.format(ticker))).day != dt.datetime.today().day:\r\n df = pdr.get_data_yahoo(ticker, start, end)\r\n df.to_csv('sp500_data/{}.csv'.format(ticker))\r\n # prints out data that was not and does not need udpating.\r\n else:\r\n print('{} is already saved'.format(ticker))\r\n if comp == True:\r\n compile_data()", "def download_compressed_dataset(url):\n raise NotImplementedError", "def main(output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n baseurl = 'http://codeandbeer.org/virtual/BigData/Labs/'\n files = ['Booking-20151012-1322.csv', 'Booking-20181025-1232.csv']\n for filename in files:\n r = requests.get(baseurl+filename, stream=True)\n if r.status == 200:\n with open(output_filepath+\"/\"+filename, \"wb\") as f:\n f.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)", "def pull(self, domain=\"data.cityofchicago.org\",dataset_id=\"crimes\",\\\n token=\"ZIgqoPrBu0rsvhRr7WfjyPOzW\",store=True, out_fname=\"pull_df.p\",\n pull_all=False):\n\n pdb.set_trace()\n client = Socrata(domain, token)\n if domain == \"data.cityofchicago.org\" and dataset_id==\"crimes\":\n self._coord1 = \"latitude\"\n self._coord2 = \"longitude\"\n self._EVENT = \"primary_type\"\n\n if pull_all:\n new_data = client.get(dataset_id)\n # pull_df = pd.DataFrame(new_data).dropna(\\\n # subset=[self._coord1, self._coord2, self._DATE, self._EVENT],\\\n # axis=1).sort_values(self._DATE)\n # NOTE: running into columns encoded in unicode not accepting subset\n # specific filtering of NaN's by column error\n # columns defined in subset aren't columns in the pulled DataFrame\n pull_df = pd.DataFrame(new_data).dropna().sort_values(self._DATE)\n self._logdf = pull_df\n else:\n self._logdf.sort_values(self._DATE)\n pull_after_date = \"'\"+str(self._logdf[self._DATE].iloc[-1]).replace(\\\n \" \", \"T\")+\"'\"\n new_data = client.get(dataset_id, where=\\\n (\"date > \"+pull_after_date))\n if domain == \"data.cityofchicago.org\" and dataset_id==\"crimes\":\n self._DATE = \"date\"\n # pull_df = pd.DataFrame(new_data).dropna(\\\n # subset=[self._coord1, self._coord2, self._DATE, self._EVENT],\\\n # axis=1).sort_values(self._DATE)\n pull_df = pd.DataFrame(new_data).dropna().sort_values(self._DATE)\n self._logdf = self._logdf.append(pull_df)\n\n if store:\n assert out_fname is not None, \"Out filename not specified\"\n self._logdf.to_pickle(out_fname)\n\n return", "def download(uri: str) -> None:\n logger = logging.getLogger(__name__)\n logger.info('Download the dataset')\n\n # create destination dirs\n destination = project_dir / 'data' / 'raw'\n destination.mkdir(exist_ok=True, parents=True)\n\n # download the file\n urllib.request.urlretrieve(uri, destination / \"original.zip\")", "def __download(self, year, month, day):\n print 'Download...'\n logging.info('[download]->Download...')\n t = datetime.datetime(year, month, day)\n spdata.download(stime=t, stations=self.aodSetting.stations, ftp_dir=self.aodSetting.ftp_root, data_dir=self.aodSetting.dd_dir, ftp_ip=self.aodSetting.ftp_ip,\n user=self.aodSetting.ftp_user, pword=self.aodSetting.ftp_psw)\n print 'Download Done!'\n logging.info('[download]->Download Done!')", "def _download_metafile(dataset, path=None):\n if not path:\n path = sunpy.config.get('downloads', 'sample_dir')\n base_url = 'https://spdf.gsfc.nasa.gov/pub/software/cdawlib/0MASTERS/'\n fname = dataset.lower() + '_00000000_v01.cdf'\n url = base_url + fname\n try:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=True)\n except ModuleNotFoundError:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=False)\n return downloaded_file", "def download_latest_customers(self):\n file_id, data = self._gdrive.get_last_customer_data_file()\n self._datastore.force_data_storage('customer', data)", "def make_all_files_historical():\n sql=\"UPDATE files SET is_history=1\"\n execute_query(sql)\n sql=\"UPDATE files SET should_instrument=0\"\n execute_query(sql)", "def _download_training_data_from_file_system(self) -> 'DataFrame':\n\n try:\n url = f\"{self._wml_client.wml_credentials['url']}/v2/asset_files/{self.location.path.split('/assets/')[-1]}\"\n # note: stream the whole CSV file\n csv_response = requests.get(url,\n params=self._wml_client._params(),\n headers=self._wml_client._get_headers(),\n stream=True,\n verify=False)\n\n if csv_response.status_code != 200:\n raise ApiRequestFailure(u'Failure during {}.'.format(\"downloading model\"), csv_response)\n\n downloaded_asset = csv_response.content\n # note: read the csv/xlsx file from the memory directly into the pandas DataFrame\n buffer = io.BytesIO(downloaded_asset)\n data = try_load_dataset(buffer=buffer,\n sheet_name=self.auto_pipeline_params.get('excel_sheet', 0),\n separator=self.auto_pipeline_params.get('csv_separator', ','),\n encoding=self.auto_pipeline_params.get('encoding', 'utf-8')\n )\n except (ApiRequestFailure, AttributeError):\n with open(self.location.path, 'rb') as data_buffer:\n data = try_load_dataset(buffer=data_buffer,\n sheet_name=self.auto_pipeline_params.get('excel_sheet', 0),\n separator=self.auto_pipeline_params.get('csv_separator', ','),\n encoding=self.auto_pipeline_params.get('encoding', 'utf-8')\n )\n\n return data", "def download_data(filename, merge, java_application):\n oldrecords = []\n oldfile = None\n\n\n\n if merge:\n try:\n oldfile = open(filename, 'rb')\n except IOError:\n logging.info('No file to merge. Creating new file %s',\n filename)\n if oldfile:\n logging.info('Merging with existing file %s', filename)\n oldrecords = loader.UnpickleFromFile(oldfile)\n oldfile.close()\n if oldrecords:\n\n last_timestamp = oldrecords[0].start_timestamp_milliseconds()\n records = loader.FromMemcache(filter_timestamp=last_timestamp,\n java_application=java_application)\n else:\n records = loader.FromMemcache(java_application=java_application)\n\n merged_records = records + oldrecords\n try:\n outfile = open(filename, 'wb')\n except IOError:\n logging.error('Cannot open %s', filename)\n return\n loader.PickleToFile(merged_records, outfile)\n outfile.close()", "def download_stock_data(symbol, interval, period):\n\tprices = yf.Ticker(symbol).history(period=period, interval=interval)\n\tif len(prices) > 0:\n\t\tresult_file = '../data/' + symbol + '.csv'\n\t\tprices.to_csv(result_file)\n\t\tprint(f\"Downloaded stock data of {stocks[s]}, data shape {prices.shape}, saved as {result_file}\")", "def download_and_prepare(self):\n self._download_and_prepare()", "def get_all_binance_modified(symbol, kline_size, save=True, client=Client()):\n\n filename = 'history/%s-%s-data.csv' % (symbol, kline_size)\n if os.path.isfile(filename):\n data_df = pd.read_csv(filename)\n else:\n data_df = pd.DataFrame()\n oldest_point, newest_point = minutes_of_new_data(symbol, kline_size, data_df, source=\"binance\", client=client)\n oldest_point = datetime.strptime('23 Sep 2021', '%d %b %Y')\n delta_min = (newest_point - oldest_point).total_seconds() / 60\n available_data = math.ceil(delta_min / binsizes[kline_size])\n print(oldest_point)\n if oldest_point == datetime.strptime('1 Jan 2017', '%d %b %Y'):\n print('Downloading all available %s data for %s. Be patient..!' % (kline_size, symbol))\n else:\n print('Downloading %d minutes of new data available for %s, i.e. %d instances of %s data.' % (\n delta_min, symbol, available_data, kline_size))\n klines = client.get_historical_klines(symbol, kline_size, oldest_point.strftime(\"%d %b %Y %H:%M:%S\"),\n newest_point.strftime(\"%d %b %Y %H:%M:%S\"))\n data = pd.DataFrame(klines,\n columns=['timestamp', 'open', 'high', 'low', 'close', 'volume', 'close_time', 'quote_av',\n 'trades', 'tb_base_av', 'tb_quote_av', 'ignore'])\n data['timestamp'] = pd.to_datetime(data['timestamp'], unit='ms')\n if len(data_df) > 0:\n temp_df = pd.DataFrame(data)\n data_df = data_df.append(temp_df)\n else:\n data_df = data\n data_df.set_index('timestamp', inplace=True)\n data_df = data_df[~data_df.index.duplicated(keep='last')]\n if save and os.path.exists('./history'): data_df.to_csv(filename)\n print('All caught up..!')\n data_df.index = pd.to_datetime(data_df.index, utc=True)\n data_df = data_df[~data_df.index.duplicated(keep='last')]\n return data_df.astype(float)", "def downloadData(url):\n response = urllib2.urlopen(url)\n html = response.read()\n localfile = open('hitdata.csv', 'wb')\n localfile.write(html)\n localfile.close()", "def download_data(self, filename=None):\n if (filename is None): filename = ['Public','Gathering.dat']\n elif (type(filename) is str): filename = [filename]\n elif (type(filename) is list): pass\n else: raise TypeError('Require the file path (\\'Public/Gathering.dat\\')')\n\n self.newportxps.ftpconn.connect(**self.newportxps.ftpargs)\n remote_path = posixpath.join(self.newportxps.ftphome, *filename)\n self.newportxps.ftpconn.cwd(remote_path)\n self.newportxps.ftpconn.save(posixpath.basename(remote_path), posixpath.basename(remote_path))\n self.newportxps.ftpconn.close()", "def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):\n logging.info(\"Fetch housing data.....\")\n os.makedirs(housing_path, exist_ok=True)\n tgz_path = os.path.join(housing_path, \"housing.tgz\")\n urllib.request.urlretrieve(housing_url, tgz_path)\n housing_tgz = tarfile.open(tgz_path)\n housing_tgz.extractall(path=housing_path)\n housing_tgz.close()", "def download(self):\n \n if not os.path.exists(self.directory):\n os.mkdir(self.directory)\n if not os.path.exists(self.fullPath):\n os.mkdir(self.fullPath)\n \n dm = pymodis.downmodis.downModis(self.fullPath, self.password, self.username, self.url, self.tiles, self.path, self.dataset, \n self.today, self.enddate, jpg = False, debug = True, timeout = 30)\n dm.connect()\n self.filelist = dm.getListDays() \n self.observations = len(dm.getListDays()) \n \n if self.dataset != 'MOD13Q1.005':\n if self.observations % 2 != 0:\n raise IOError(\"The total number of observations through time must be an even number. Please add or remove an observation before or after %s\" % str(self.filelist[0]))\n \n dm.downloadsAllDay()\n logger.log('SUCCESS', 'Downloading is complete! %d HDF files of %s data for tiles %s were downloaded for the following days: %s' % (self.observations*len(self.tiles), str(self.dataset), str(self.tiles), str(self.filelist)))", "def download_dataset(base_dir, scene):\n\n # setup depends on dataset\n if len(scene.split('_')) == 1: # default\n modality, part = None, None # declaration necessary for instatiation check\n base_dir = Path(base_dir).expanduser().joinpath(scene)\n filepath_data = base_dir.joinpath(DATASETS_CONFIG[scene]['img']['name'])\n filepath_labels = base_dir.joinpath(DATASETS_CONFIG[scene]['gt']['name'])\n \n elif len(scene.split('_')) == 3: # AeroRIT\n scene, modality, part = scene.split('_')\n base_dir = Path(base_dir).expanduser().joinpath(scene)\n filepath_data = base_dir.joinpath(DATASETS_CONFIG[scene][modality]['img']['name'])\n filepath_labels = base_dir.joinpath(DATASETS_CONFIG[scene][modality]['gt']['name'])\n else :\n raise RuntimeError('Given scene unknown!')\n\n base_dir.mkdir(parents=True, exist_ok=True)\n\n # download data and load from file\n if filepath_data.suffix == '.mat': # datasets from ehu.es\n if not filepath_data.is_file():\n with TqdmUpTo(unit='B', unit_scale=True, miniters=1,\n desc=\"Downloading {}\".format(filepath_data)) as t:\n url = DATASETS_CONFIG[scene]['img']['url']\n urlretrieve(url, filename=filepath_data, reporthook=t.update_to)\n\n if not filepath_labels.is_file():\n with TqdmUpTo(unit='B', unit_scale=True, miniters=1,\n desc=\"Downloading {}\".format(filepath_labels)) as t:\n url = DATASETS_CONFIG[scene]['gt']['url']\n urlretrieve(url, filename=filepath_labels, reporthook=t.update_to)\n \n data = loadmat(filepath_data)[DATASETS_CONFIG[scene]['img']['key']]\n labels = loadmat(filepath_labels)[DATASETS_CONFIG[scene]['gt']['key']]\n\n elif filepath_data.suffix == '.tif': # aerorit\n if not filepath_data.is_file(): # download image if necessary\n print(\"Downloading {}\".format(filepath_data))\n url = DATASETS_CONFIG[scene][modality]['img']['url']\n gdown.download(url=url, output=str(filepath_data), quiet=False)\n\n if not filepath_labels.is_file(): # download labels if necessary\n print(\"Downloading {}\".format(filepath_labels))\n url = DATASETS_CONFIG[scene][modality]['gt']['url']\n gdown.download(url=url, output=str(filepath_labels), quiet=False)\n \n # extract part of image as defined in Rangnekar et al.\n base_dir = base_dir.joinpath(modality).joinpath(part)\n base_dir.mkdir(parents=True, exist_ok=True)\n \n # check early if data exists already to avoid unecessarily loading and encoding data\n filepath_hdf = base_dir.joinpath(f'aerorit_{modality}_{part}.h5')\n if filepath_hdf.is_file():\n return filepath_hdf\n\n # extract defined part of dataset\n start_col = DATASETS_CONFIG[scene][part]['start_col']\n end_col = DATASETS_CONFIG[scene][part]['end_col']\n \n data = np.transpose(io.imread(filepath_data), (1,2,0))[53:,7:,:]\n data = data[:, start_col:end_col, :]\n\n labels = encode_labelmap(io.imread(filepath_labels), AERORIT_COLOURLABELMAP)[53:,7:]\n labels = labels[:, start_col:end_col]\n filepath_data = filepath_hdf\n\n filepath_hdf = filepath_data.with_suffix('.h5')\n \n # export data and labels to hdf\n if not filepath_hdf.is_file():\n with h5py.File(filepath_hdf, \"w\") as f:\n f.create_dataset(\"data\", data=data)\n f.create_dataset(\"labels\", data=labels)\n f.attrs['scene'] = scene\n if not modality is None:\n f.attrs['modality'] = modality\n if not part is None:\n f.attrs['part'] = part\n return filepath_hdf\n\n return filepath_hdf", "def retrieve_data(file: str, landing_path: str, local: bool) -> bool:\n\n base_url = \"https://files.training.databricks.com/static/data/health-tracker/\"\n url = base_url + file\n driverPath = \"file:/databricks/driver/\" + file\n dbfsPath = landing_path + file\n if local:\n urlretrieve(url, landing_path + file)\n else:\n urlretrieve(url, file)\n dbutils.fs.mv(driverPath, dbfsPath)\n return True", "def store_stock_data(stock_name = 'TSLA'):\n stonk = yf.Ticker(stock_name) # gets stock data from yahoo\n hist = stonk.history(period=\"max\") # historical stock prices\n hist.reset_index(inplace=True) # takes the date stamp out of the index column\n hist.rename(columns = {'Date':\"DateTime\"},inplace=True) # Changes the name of the date column\n hist['DateTime'] = pd.to_datetime(hist['DateTime'],utc=True) # Changes the timestamps to UTC\n hist.to_csv('../data/raw/'+stock_name+'_stock_price.csv')\n return", "def _get_data(self):\n try:\n \n with open('auto-mpg.data.txt', 'w') as data_file:\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n logger.debug(f'response code from url: 200')\n self.response_code = 200\n for line in r.iter_lines():\n data_file.write(line.decode() + '\\n')\n else:\n self.response_code = r.status_code\n logger.info(f'{url} returned status code {r.status_code}')\n except Exception as e:\n logger.info(f'Unexpected error writing to file {str(e)}. Exiting.')\n sys.exit()", "def download(directory: str) -> None:\n path = f'{directory}/m5/datasets'\n if not os.path.exists(path):\n download_file(directory=path,\n source_url=M5.source_url,\n decompress=True)", "def download_data(base_url,\n lista_anni,\n lista_inquinanti):\n \n # Inizializziamo la lista dei df ognuno dei quali corrisponde ad un inquinante\n df_template = pd.DataFrame(columns=['jd','h','1','2','3','4','5','6','7','8','9','10','11','13','14','15','16','38','39','40',\n '41','45','47','48','49','55','56','57','60','83','84','85','86','87','Anno','Inquinante'])\n lista_df = [df_template]\n\t\n\t# Per ogni inquinante\n for chimico in lista_inquinanti:\n \t# Per ogni anno\n for anno in lista_anni:\n print('Retrieving {} for year {} from {}'.format(chimico, anno, compose_url(base_url, anno, chimico)))\n \n # Esegui la richiesta\n r = requests.get(compose_url(base_url, anno, chimico))\n\n # Crea il rispettivo dataframe\n df = write_response(r)\n print('{} rows'.format(len(df)))\n\t\t\t\n\t\t\t# Prendi la linea che corrisponde all'header del df\n columns_ = df.iloc[0].index[0]\n \n \"\"\" Individua i nomi delle colonne splittando la stringa che li contiene tutti\n ed escludendo lestringhe vuote ottenute tramite lo split\"\"\"\n clean_columns = [item.strip()\\\n for item in columns_.split(' ')\\\n if len(item)!=0]\n \n # aggiungo le colonne Anno e Inquinante\n columns = clean_columns + ['Anno', 'Inquinante']\n\t\t\t\n list_rows = []\n # Per ogni linea del df\n for line_idx in range(1, len(df)):\n \t\n # Come nel caso precedente splitto la linea per ottenere le diverse celle\n line = df.iloc[line_idx].values[0].strip().split(' ')\n \n # Quindi ottengo la lista delle celle della riga i-th\n raw_line = [item for item in line if len(item)!=0] \n \n # Aggiungiamo le colonne anno e inquinante\n list_rows += [raw_line + [anno, chimico]]\n\t\t\t\n\t\t\t# Definiamo il nuovo dataset \n df_idx = pd.DataFrame(list_rows, columns=columns)\n \n # Creiamo aggiungiamo alla lista di df da concatenare quello appena creato \n lista_df += [df_idx]\n\n\t# Facciamo la union dei df tenendo conto che le colonne possono essere diverse (concat con pandas)\n df_final = pd.concat(lista_df, ignore_index=False)\n\n # sostituisco i NaN e -999.0 con un valore vuoto\n df_final = df_final.fillna('')\n df_final = df_final.replace(to_replace='-999.0', value='')\n \n return df_final", "def download_france_data():\n start = time.time()\n oc19_file = \"opencovid19-fr-chiffres-cles.csv\"\n gouv_file = \"data-gouv-fr-chiffres-cles.csv\"\n oc19_url = \"https://raw.githubusercontent.com/opencovid19-fr/data/master/dist/chiffres-cles.csv\"\n gouv_url = \"https://www.data.gouv.fr/fr/datasets/r/f335f9ea-86e3-4ffa-9684-93c009d5e617\"\n # run requests to download and save the data\n myfile = requests.get(oc19_url)\n with open(oc19_file, \"wb\") as f:\n f.write(myfile.content)\n file = requests.get(gouv_url)\n with open(gouv_file, \"wb\") as f:\n f.write(file.content)\n # Load both csv into pandas\n data = pd.read_csv(oc19_file)\n data_gouv = pd.read_csv(gouv_file)\n # Fill in some of the metadata that is not present in the government data\n data_gouv[\"granularite\"] = \"pays\"\n data_gouv[\"maille_code\"] = \"FRA\"\n data_gouv[\"maille_nom\"] = \"France\"\n data[\"source_nom\"] = \"Santé publique France Data\"\n data_gouv[\"source_url\"] = \"https://www.data.gouv.fr/fr/datasets/r/f335f9ea-86e3-4ffa-9684-93c009d5e617\"\n data_gouv.rename(DATA_GOUV_2_OPEN, axis=\"columns\", inplace=True)\n end = time.time()\n print(\"Time spent on download_france_data: {0:.5f} s.\".format(end - start)) \n return pd.concat((data, data_gouv), join=\"outer\")", "def historicalData(self, reqId: int, bar: BarData):\n with open('realtime_data/data.csv', 'a') as file:\n file.write(f'{bar.date},{bar.open},{bar.high},{bar.low},{bar.close},{bar.barCount},{bar.volume},{bar.average}\\n')", "def download_all_datasets():\n print(\"Downloading all datasets ...\")\n for dataset in get_available_datasets():\n download_dataset(dataset)" ]
[ "0.7490032", "0.70159507", "0.6900648", "0.673739", "0.6722774", "0.66758853", "0.65556735", "0.6517671", "0.6412614", "0.63694435", "0.633438", "0.6323673", "0.63144374", "0.62915635", "0.62536067", "0.6218456", "0.6188079", "0.61649686", "0.61501503", "0.61264765", "0.61185014", "0.6113233", "0.6097618", "0.6097548", "0.6079537", "0.605391", "0.60319406", "0.6021103", "0.6020125", "0.60187244", "0.59877616", "0.5987622", "0.5959927", "0.5959638", "0.59557605", "0.5954742", "0.59345704", "0.59333336", "0.59328824", "0.5928049", "0.59274423", "0.59191203", "0.5898002", "0.588356", "0.5862359", "0.58568686", "0.58540386", "0.58524317", "0.5843443", "0.58352154", "0.5833594", "0.5832066", "0.5825745", "0.5809855", "0.5808269", "0.5808269", "0.57705814", "0.57599396", "0.57585853", "0.5754954", "0.57375765", "0.5737133", "0.5736197", "0.5735039", "0.5714117", "0.57047445", "0.5699959", "0.5697813", "0.56916565", "0.56906265", "0.5677307", "0.567365", "0.56726646", "0.56686217", "0.5668521", "0.56649864", "0.5660857", "0.5660165", "0.5653046", "0.56480926", "0.5622462", "0.56213945", "0.5621286", "0.5616942", "0.56137425", "0.5612712", "0.5612534", "0.5609672", "0.560948", "0.5602116", "0.5593671", "0.55934674", "0.5593012", "0.5589282", "0.5579375", "0.5575549", "0.55739105", "0.557301", "0.556754", "0.5563744" ]
0.73869425
1
replacing data in Google Cloud Table
def replace_data_in_gbq_table(project_id, table_id, complete_dataset): complete_dataset.to_gbq( destination_table=table_id, project_id=project_id, credentials=credentials, if_exists="replace", ) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replace_data(\n table_id: str = typer.Option(..., help=\"The id of the table\"),\n test: bool = typer.Option(False, help=\"Whether is a test or not\")\n) -> None:\n base_path = Path.cwd().parent\n\n tbl = bd.Table(dataset_id=\"br_cgu_servidores_executivo_federal\", table_id=table_id)\n output_path = f\"output/{table_id}\" if not test else f\"output/test/{table_id}_test\"\n tbl.create( # pylint: disable=no-value-for-parameter\n path=str(base_path / output_path),\n if_table_exists=\"pass\",\n if_storage_data_exists=\"replace\",\n if_table_config_exists=\"pass\"\n )", "def populate_table(database, table, data):\n\n for row in data:\n database.session.add(table(row))\n database.session.commit()", "def htable_put(table, key, value):", "def on_update_seatable(data, index, *args):\n row = convert_row(metadata, data)\n print(row)", "def replace(self, table_name, data):\n fields = map((lambda s: \"`\" + str(s) + \"`\"), data.keys())\n values = map(self.quote, data.values())\n curs = self.q(\n \"REPLACE INTO `{0}` ({1}) VALUES({2})\".format(table_name, \", \".join(fields), \", \".join(values)), True)\n last_id = curs.lastrowid\n curs.close()\n return last_id", "def update(table, id_):\n\n # your code\n key = common.check_for_key(id_,table)\n if key == None:\n ui.print_error_message('Key does not exist')\n else:\n return_inputs = ui.get_inputs(['Name', 'Age'], 'Enter New Values')\n modif_index = key\n\n table[modif_index][NAME] = return_inputs[FIRST_PROP]\n table[modif_index][AGE] = return_inputs[SECOND_PROP]\n data_manager.write_table_to_file('hr/persons.csv', table) \n\n return table", "def update_database():\n\n # We obtain the data from the official database\n df = getData.extractData()\n\n # We save the dataframe for later use in the API\n auxiliary.saveToCsv(df, 'app/resources')", "def update_vluln_table():", "def replace_rows(table_id, fd, startLine=None):\n\n media_body = MediaIoBaseUpload(fd, mimetype='application/octet-stream')\n\n service = build_service(\"fusiontables\", \"v2\")\n table = service.table()\n command = table.replaceRows(tableId=table_id,\n media_body=media_body,\n startLine=startLine)\n try:\n command.execute()\n except HttpError as e:\n print(e)", "def prescribing_transform(row):\n # To match the prescribing table format in BigQuery, we have\n # to re-encode the date field as a bigquery TIMESTAMP and drop\n # a couple of columns\n row[10] = \"%s 00:00:00\" % row[10]\n del(row[3])\n del(row[-1])\n return row", "def table_callback(table, data, event, column=False):\n if column:\n table.object = data.loc[event.new]\n else:\n table.object = data.loc[event.new:event.new]", "def map (a_data,a_column,a_old,a_new) :\n loc_new_data = a_data\n a_data[a_column].replace(a_old,a_new,inplace=True)", "def update(table, id_):\n\n # your code\n\n return table", "def update(table, id_, record):\n\n index_id = 0\n record.insert(index_id, common.generate_random(table))\n table.append(record)\n data_manager.write_table_to_file(\"model/sales/sales.csv\", table)\n\n entry_index = 0\n for entry in table:\n entry_id_ = entry[0]\n if entry_id_ == id_:\n del table[entry_index]\n entry_index += 1\n\n return table", "def replace(self, table, _id, indata, fail_on_empty=True):\n try:\n with self.lock:\n for i, _ in self._find(table, self._format_filter({\"_id\": _id})):\n break\n else:\n if fail_on_empty:\n raise DbException(\"Not found entry with _id='{}'\".format(_id), HTTPStatus.NOT_FOUND)\n return None\n self.db[table][i] = deepcopy(indata)\n return {\"updated\": 1}\n except DbException:\n raise\n except Exception as e: # TODO refine\n raise DbException(str(e))", "def callUpdateTable(self):\r\n self.updateTable()", "def mark_obsolete_in_dataset( dataset_name, engine, table ):\n s = table.select( table.c.dataset_name==dataset_name ) \n result = conn.execute(s) # all rows of replica.files with the specified dataset_name\n\n sr = []\n srf = {}\n for row in result:\n # Note that you can loop through result this way only once.\n sr.append(row)\n fn = filename(row)\n if fn in srf:\n srf[fn].append(row)\n else:\n srf[fn] = [row]\n\n #sr.sort( key=filename )\n\n for fn,rows in srf.items():\n if len(rows)<=1: continue\n rows.sort( key=rowversion )\n print \"jfp will keep abs_path=\",rows[-1]['abs_path'],\"status=\",rows[-1]['status'],\\\n \"dataset_name=\",rows[-1]['dataset_name']\n for row in rows[0:-1]:\n abs_path = row['abs_path']\n dataset_name = \"old_\"+row['dataset_name']\n print \"jfp will do update for abs_path=\",abs_path,\"status from\",row['status'],\"to 50\"\n s = table.update().where( table.c.abs_path==abs_path ).\\\n values( status=50 )\n #if dataset_name.find('old_old_')!=0:\n # s = table.update().where( table.c.abs_path==abs_path ).\\\n # values( dataset_name=dataset_name )\n # ... doesn't work, you first have to create a row in replica.datasets with this name.\n result = conn.execute(s)", "def put_data(data):\n at_write = airtable.Airtable(app.config['AIRTABLE_BASE'],\n app.config['AIRTABLE_WRITE_KEY'])\n return at_write.create(app.config['AIRTABLE_TABLE'] , data)", "def update_records(cursor,table_schema,table_name,column_name,value):\n update_records = \"UPDATE \" + table_schema + \".\" + table_name + \" SET \" + column_name + \"='\" + value + \"' WHERE COALESCE(\" + column_name + \",'')='';\"\n cursor.execute(update_records)", "def process(tr, parameters, tableBuilder):\n user = parameters.get(\"user\")\n if not user == None:\n tr.setUserId(user)\n\n sampleID = parameters.get(\"identifier\")\n sample = tr.getSampleForUpdate(sampleID)\n\n properties = parameters.get(\"properties\")\n \n for prop in properties.keySet():\n \tsample.setPropertyValue(prop, properties.get(prop))", "def update_table(dd_value, df):\n df = pd.read_json(df, orient=\"split\")\n return summary_table_tmp_rh_tab(df, dd_value)", "def execute(self, context):\n\n # Initialize PostgreSQL hook\n self.postgres = PostgresHook(\n postgres_conn_id=self.postgres_conn_id,\n schema=self.postgres_schema).get_sqlalchemy_engine()\n\n # Initialize Socrata hook\n super().execute()\n\n # Load table\n table = self._select_table()\n self.table_dicts = [dict(row) for row in table]\n\n if self.replace:\n result = self.socrata.replace(self.dataset_id, self.table_dicts)\n else:\n # Code from etl-airflow\n for i in range(0, len(self.table_dicts), UPLOAD_CHUNK_SIZE):\n try:\n result = self.socrata.upsert(self.dataset_id, self.table_dicts[i:i+UPLOAD_CHUNK_SIZE])\n except:\n print(f\"Error on record {i}\")\n result = self.socrata.upsert(self.dataset_id, self.table_dicts[i:i+UPLOAD_CHUNK_SIZE])", "def main():\n\n\n\n\n try:\n\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n 'version=v4')\n service = discovery.build('sheets', 'v4', http=http,\n discoveryServiceUrl=discoveryUrl)\n\n # Specify Google Translation Spreadsheet\n spreadsheetId = 'ID' # Enter ID\n rangeName = 'surveys_locale!A2:L'\n\n result = service.spreadsheets().values().get(spreadsheetId=spreadsheetId, range=rangeName).execute()\n values = result.get('values', []) #column headers, table and specify rows\n\n #convert values into dataframe\n df = pd.DataFrame(values)\n\n #replace all non trailing blank values created by Google Sheets API with null values\n df_replace = df.replace([''], [None]) \n\n #convert back to list to insert into MySQL\n processed_dataset = df_replace.values.tolist() \n\n\n if not values:\n print('No data found.')\n else:\n with connection.cursor() as cursor: \n\n\n\n # CREATE translation table\n\n print('Creating translation_table table...')\n \n cursor.execute(\"\"\"CREATE TABLE `translation_table` (\n `tokens` varchar(255) NULL,\n `survey_group_name` varchar(255) COLLATE utf8_bin NULL,\n `en-US` varchar(255) COLLATE utf8_bin NULL default null,\n `nb-NO` varchar(255) COLLATE utf8_bin NULL default null,\n `sv-SE` varchar(255) COLLATE utf8_bin NULL default null,\n `de-DE` varchar(255) COLLATE utf8_bin NULL default null,\n `es-ES` varchar(255) COLLATE utf8_bin NULL default null,\n `pt-PT` varchar(255) COLLATE utf8_bin NULL default null,\n `fr-FR` varchar(255) COLLATE utf8_bin NULL default null,\n `da-DK` varchar(255) COLLATE utf8_bin NULL default null,\n `fi-FI` varchar(255) COLLATE utf8_bin NULL default null,\n `zh-CN` varchar(255) COLLATE utf8_bin NULL default null\n ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;\n\n \"\"\")\n\n\n # INSERT VALUES IN TABLE\n\n print('Inserting records into Translation table')\n\n #Iterate through the dataframe list and insert into MySQL row by row\n for keyrow, row in enumerate(processed_dataset):\n\n insert_sql = \"\"\"INSERT INTO trybe_stats.`translation_table` (`tokens`, `survey_group_name`, `en-US`, `nb-NO`, `sv-SE`, `de-DE`, `es-ES`, `pt-PT`, `fr-FR`, `da-DK`, `fi-FI`, `zh-CN`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n cursor.execute(insert_sql, [row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10], row[11]])\n\n else:\n print('No rows found')\n\n print('Finished inserting.')\n\n\n # COUNT NUMBER OF ROWS\n\n cursor.execute(\"SELECT COUNT(*) from trybe_stats.`translation_table`\")\n result=cursor.fetchone()\n print(result.values()) #returns a dictionary, so values() gets the values which is the row count\n\n\n # connection is not autocommit by default. So you must commit to save\n # your changes.\n connection.commit()\n\n\n finally:\n connection.close()", "async def put(self, key, data, table_name='opsdroid_default'):\n _LOGGER.debug(\"Putting %s into PostgreSQL table %s\", key, table_name)\n\n json_data = json.dumps(data, cls=JSONEncoder)\n\n async with self.connection.transaction():\n key_already_exists = await self.get(key, table_name=table_name)\n if key_already_exists:\n await self.connection.execute(\n \"UPDATE {} SET data = $2 WHERE key = $1\".format(table_name),\n key, json_data\n )\n else:\n await self.connection.execute(\n \"INSERT INTO {} VALUES ($1, $2)\".format(table_name),\n key, json_data\n )", "def update(table, id_):\n os.system('clear')\n table_dict = common.creat_dict_from_table(table)\n\n if id_ in list(table_dict.keys()):\n list_labels = [\"Month: \", \"Day: \", \"Year: \", \"Type: \", \"Amount: \"]\n title = \"Please provide product information\"\n updated_record = ui.get_inputs(list_labels, title)\n updated_record.insert(0, table_dict[id_][0])\n table_dict[id_] = updated_record\n table = list(table_dict.values())\n data_manager.write_table_to_file(\"store/games.csv\", table)\n else:\n ui.print_error_message(\"There is no such element.\")\n return table", "def update(self, table_name, data, id_column_name='id'):\n table = self._create_table(table_name)\n for row in data:\n try:\n statement = table.update() \\\n .where(table.c[id_column_name] == row[id_column_name]) \\\n .values(**row)\n self.cursor.execute(statement)\n except Exception as e:\n print (e)", "def ProcessTableOverwrite(ref, args, request):\n dataset_id = ref.datasetId\n table_id = ref.Name()\n project_id = ref.projectId\n\n if args.overwrite:\n if _TableExists(dataset_id, table_id, project_id):\n _TryDeleteTable(dataset_id, table_id, project_id)\n\n return request", "def update(table, id_):\n id_storage = common.get_values_from_column(table, 0)\n if id_ in id_storage:\n table = manage_data_from_user(table, id_storage, id_, True)\n # Here u can make changes:\n\n else:\n ui.print_error_message('This option does not exist.')\n\n return table", "def update_mass(self, table_name, field, data):\n sql_tpl_start = \"UPDATE `{0}` SET `{1}` = CASE \\n\".format(table_name, field)\n sql_tpl_end = \"ELSE `{0}` \\n END\".format(field)\n\n sqls = []\n for case in data:\n sqls.append(\"WHEN {0} THEN {1} \\n\".format(case, self.quote(data[case])))\n\n if len(sqls)%50 == 0:\n self.q(sql_tpl_start + \"\".join(sqls) + sql_tpl_end)\n sqls = []\n\n if len(sqls):\n self.q(sql_tpl_start + \"\".join(sqls) + sql_tpl_end)", "def update_old_row(self, data):\n for key, value in data.items():\n _column = self._labels.index([v['display'] for k, v in self.headers.items() if k == key].pop())\n cell = self.item(self._opt_row, _column)\n _cell_data = cell.get_data()\n _cell_data[key] = value\n\n cell.set_content(value, _cell_data)", "def update_row(table_str, attribute_value_dict, keys_dict): #works\n sql = make_update_row(table_str, attribute_value_dict, keys_dict)\n #print sql\n execute_edit_queries(sql)", "def populate_dyn(self, table):\n myrow = table.row\n myrow[\"sample_time\"] = int(time.time() - glob.base_time)\n myrow[\"available_bike_stands\"] = self.available_bike_stands\n myrow[\"available_bikes\"] = self.available_bikes\n myrow[\"last_update\"] = self.last_update\n myrow[\"status\"] = self.status\n myrow.append()\n table.flush()", "def update(table, id_):\n\n new_data = ui.get_inputs(\n [\"TITLE\", \"PRICE\", \"MONTH\", \"DAY\", \"YEAR\"],\n \"Please enter the new data to update\"\n )\n\n if common.confirm_option():\n\n ID = 0\n\n for game in table:\n if game[ID] == id_:\n for game_data_index in range(len(new_data)):\n game[game_data_index + 1] = new_data[game_data_index]\n\n return table", "def update(table, id_):\n # 4\n for row in table:\n if row[0] == id_:\n addnew = ui.get_inputs(\n ['month: ',\n 'day: ',\n 'year: ',\n 'type (in=income, out= outflow): ',\n 'amount (of transaction in USD): '],\n 'Updating item in Accounting table')\n addnew.insert(0, id_)\n row = addnew\n data_manager.write_table_to_file('accounting/items.csv', table)\n\n return table", "def mock_datatable_put(self, request):\n\n data = DTResilientMock.get_datatable_rows(\n self.mock_data_table_updated_rows)[0]\n\n return create_response(request,\n status_code=200,\n content=b(dumps(data)))", "def copy_and_replace_keys(self, table, key_callback):\n client = self.bq_client\n t = client.get_table(table)\n\n cross_joins = []\n\n # begin query generation process\n q = f'CREATE OR REPLACE TABLE `{table}` AS (\\nSELECT \\n'\n for field in t.schema:\n q += process_field(field, None, key_callback)\n cross_joins.extend(process_cross_joins(field, \"copy_table\"))\n q = q.strip(\",\\n\")\n q += f\"\\nFROM\\n `{table}` copy_table\"\n\n for cross_join in cross_joins:\n q += cross_join\n q += \")\"\n\n return q", "def test_rt_table(self) -> None:\n expected = Fixtures.next_table()\n expected.description = '\"hello!\" said no one'\n expected.tags.sort()\n\n self.get_proxy().put_table(table=expected)\n actual: Table = self.get_proxy().get_table(table_uri=checkNotNone(expected.key))\n actual.last_updated_timestamp = None\n actual.tags.sort()\n\n self.assertEqual(expected, actual)", "def replace_into(self, table_name, values):\n\n # This is safe: https://stackoverflow.com/questions/835092/\n # python-dictionary-are-keys-and-values-always-the-same-order\n column_names = list(values.keys())\n values = list(values.values())\n\n # Dynamically build the query\n # Be aware that the %s is NOT string formatting but parameter binding\n query = 'REPLACE INTO ' + table_name + ' (' + ', '.join(column_names) + \\\n ') VALUES (' + ', '.join(['%s'] * len(column_names)) + ')'\n\n # Execute the query and commit the results\n self.execute_query(query, tuple(values))\n\n return self.cursor.lastrowid", "def update_geonet_data():\n rs = TremorData()\n rs.update()", "def update(tablename: str, data: dict):\n try:\n if (t := tablenameRev[tablename]) not in sequenceTables:\n return False\n t.query.filter_by(id=data.pop(\"id\")).update(data)\n db.session.commit()\n del_cache_for_sequence_table(tablename)\n return True\n except:\n return False", "def update(self, mode=\"all\"):\n\n self._check_mode(mode)\n\n mode = [\"prod\", \"staging\"] if mode == \"all\" else [mode]\n for m in mode:\n\n try:\n table = self._get_table_obj(m)\n except google.api_core.exceptions.NotFound:\n continue\n\n # if m == \"staging\":\n\n table.description = self._render_template(\n Path(\"table/table_description.txt\"), self.table_config\n )\n\n # save table description\n with open(\n self.metadata_path\n / self.dataset_id\n / self.table_id\n / \"table_description.txt\",\n \"w\",\n encoding=\"utf-8\",\n ) as f:\n f.write(table.description)\n\n # when mode is staging the table schema already exists\n table.schema = self._load_schema(m)\n fields = [\"description\", \"schema\"] if m == \"prod\" else [\"description\"]\n self.client[f\"bigquery_{m}\"].update_table(table, fields=fields)\n\n logger.success(\n \" {object} {object_id} was {action}!\",\n object_id=self.table_id,\n object=\"Table\",\n action=\"updated\",\n )", "def upload_table(tablebogus, n, append, bogus):\n # bucket_name = \"your-bucket-name\"\n # source_file_name = \"local/path/to/file\"\n # destination_blob_name = \"storage-object-name\"\n fake = Faker()\n storage_client = storage.Client()\n bucket = storage_client.bucket(\"brick-layer-testing\")\n outstring = \"\"\n for i in range(0, int(n)):\n populated = {}\n for record in tablebogus:\n # print(ast.literal_eval(record[\"args\"]))\n if record[\"dist\"] == \"fk\":\n record[\"fk_type\"] = getffktype(record[\"from\"].split(\".\")[0], record[\"from\"].split(\".\")[1])\n record[\"bq_type\"] = getfbqtype(record[\"from\"].split(\".\")[0], record[\"from\"].split(\".\")[1])\n record[\"fk_args\"] = getfargs(record[\"from\"].split(\".\")[0], record[\"from\"].split(\".\")[1])\n if (record[\"fk_type\"] == \"past_datetime\"):\n populated[record[\"field_name\"]] = getattr(fake, record[\"fk_type\"])(**ast.literal_eval(json.dumps(record[\"fk_args\"]))).isoformat()\n elif (record[\"fk_type\"] == \"random_element\"):\n populated[record[\"field_name\"]] = random.choice(ast.literal_eval(json.dumps(record[\"fk_args\"][\"elements\"][1:-1].split(\", \"))))\n elif (record[\"fk_type\"] == \"paragraph\"):\n record[\"fk_args\"] = {\n \"nb_sentences\": 3,\n \"variable_nb_sentences\": True\n }\n # print(json.dumps(record[\"fk_args\"]).replace(\"'\",'\"').replace(\"'\",'\"').replace(\"'\",'\"').replace(\"'\",'\"').replace(\"true\", \"True\"))\n populated[record[\"field_name\"]] = getattr(fake, record[\"fk_type\"])(**ast.literal_eval(json.dumps(record[\"fk_args\"]).replace(\"'\",'\"').replace(\"'\",'\"').replace(\"'\",'\"').replace(\"'\",'\"').replace(\"true\", \"True\")))\n # print(ast.literal_eval(record[\"args\"])[\"elements\"][1:-1].split(\", \"))\n # print(populated[record[\"name\"]])\n elif (record[\"fk_type\"] == \"longitude\" or record[\"fk_type\"] == \"latitude\"):\n populated[record[\"field_name\"]] = float(getattr(fake, record[\"fk_type\"])(**ast.literal_eval(json.dumps(record[\"fk_args\"]))))\n else:\n # print(record)\n populated[record[\"field_name\"]] = getattr(fake, record[\"fk_type\"])(**ast.literal_eval(json.dumps(record[\"fk_args\"])))\n outstring += json.dumps(populated) + \"\\n\"\n\n # purg_filename = tablebogus[0]['table_name']+datetime.now().isoformat()+\".txt\"\n # file = open(purg_filename, \"w\")\n # file.write(outstring)\n blob = bucket.blob(tablebogus[0]['table_name'])\n\n blob.upload_from_string(outstring)\n # os.remove(purg_filename)\n\n print(\n ''' \n -> {} uploaded to The Cloud.\n\n '''.format(\n tablebogus[0]['dataset']+\".\"+tablebogus[0]['table_name']\n )\n )\n\n from google.cloud import bigquery\n client = bigquery.Client()\n dataset_ref = tablebogus[0]['dataset']\n\n ids = []\n for i in list(client.list_datasets()):\n ids.append(i.dataset_id)\n if dataset_ref in ids:\n dataset_ref = client.dataset(dataset_ref)\n else:\n dataset_ref = client.create_dataset(dataset_ref) # Make an API request.\n # print(\"Created dataset {}.{}\".format(client.project, dataset.dataset_id))\n print(\" -> This is where I am :: \" + tablebogus[0]['table_name'])\n # dataset_ref = client.dataset(dataset_id)\n job_config = bigquery.LoadJobConfig()\n sch_lst = []\n for field in tablebogus:\n sch_lst.append(bigquery.SchemaField(field['field_name'], field['bq_type']))\n if append:\n job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND\n else:\n job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE\n job_config.schema = sch_lst\n job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON\n uri = \"gs://brick-layer-testing/\"+tablebogus[0]['table_name']\n\n load_job = client.load_table_from_uri(\n uri,\n dataset_ref.table(tablebogus[0]['table_name']),\n location=\"US\", # Location must match that of the destination dataset.\n job_config=job_config,\n ) # API request\n print(\" -> Starting to move shit over to BQ {}\".format(load_job.job_id))\n\n load_job.result() # Waits for table load to complete.\n # print(\"Job finished.\")\n\n destination_table = client.get_table(dataset_ref.table(tablebogus[0]['table_name']))\n print(\" -> There are {} bogus rows.\".format(destination_table.num_rows))\n\n blob = bucket.blob(tablebogus[0]['table_name'])\n blob.delete()\n print(\" -> Tidying up...\")\n # extract schema.json\n # make fakeout.json\n # upload to cloud storage\n # move from cloud storage to bq", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def sit(self, table):\n self.table = table", "def update_database(self):\r\n \r\n self.initgta()\r\n \r\n credentials = self.get_credentials()\r\n http = credentials.authorize(httplib2.Http())\r\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\r\n 'version=v4')\r\n service = discovery.build('sheets', 'v4', http=http,\r\n discoveryServiceUrl=discoveryUrl)\r\n\r\n spreadsheetId = '1Avxh9i3ObSn7rf8iA75JBwdmdWRis7FS8WezsO9E6sE'\r\n rangeName = 'Statistiken (Details) #2017!A1:M'\r\n result = service.spreadsheets().values().get(\r\n spreadsheetId=spreadsheetId, range=rangeName, valueRenderOption='FORMULA').execute()\r\n values = result.get('values', [])\r\n \r\n rangeName = 'Statistiken (Details) #2018!A1:M'\r\n result = service.spreadsheets().values().get(\r\n spreadsheetId=spreadsheetId, range=rangeName, valueRenderOption='FORMULA').execute()\r\n values += result.get('values', [])\r\n \r\n players = [None] * 7\r\n points = [None] * 7\r\n game = ''\r\n playDate = 0\r\n raceid = None\r\n playlistid = None\r\n \r\n if not values:\r\n print('No data found.')\r\n else:\r\n for row in values:\r\n #print('%s' % row)\r\n \r\n if(len(row) >= 9 and row[8] != None):\r\n # new playlist\r\n self.insertPlaylist(row[8], row[9])\r\n \r\n if len(row) >= 8 and row[2] != None and row[2] != \"\":\r\n # new race\r\n if raceid == None:\r\n isCanceled = False\r\n if row[1] == None or row[1] == \"\":\r\n isCanceled = True\r\n playlistid = int(self.getCurrentPlaylistId())\r\n racenumber = self.getNextRaceNumber(playlistid)\r\n self.insertRace(playlistid, racenumber, isCanceled)\r\n raceid = self.getCurrentRaceId()\r\n rank = 0\r\n \r\n # new raced\r\n rank = row[1]\r\n player = row[2].lower()\r\n vehicle = row[3]\r\n racetime = row[4]\r\n bestlap = row[5]\r\n money = row[6] \r\n \r\n self.checkPlayer(player)\r\n self.addVehicle(vehicle)\r\n self.insertRaced(raceid, rank, bestlap, racetime, vehicle, player, money, \"\")\r\n rank += 1\r\n else:\r\n playlistid = None\r\n raceid = None\r\n \r\n # 2019 sheet has a different structure\r\n #rangeName = 'Statistiken (Details) #2019!A2:N'\r\n #rangeName2020 = 'Statistiken (Details) #2020!A2:N'\r\n rangeNames = ['Statistiken (Details) #2019!A2:N', 'Statistiken (Details) #2020!A2:N']\r\n result2019 = service.spreadsheets().values().batchGet(\r\n spreadsheetId=spreadsheetId, ranges=rangeNames, valueRenderOption='FORMULA').execute()\r\n rangesV2 = result2019.get('valueRanges', [])\r\n print('{0} ranges retrieved.'.format(len(rangesV2)))\r\n\r\n\r\n for range in rangesV2:\r\n if not range:\r\n print('No data found.')\r\n else:\r\n values = range.get('values', [])\r\n print('{0} values in range retrieved.'.format(len(values)))\r\n for row in values:\r\n #print('Row: %s' % row)\r\n \r\n if(len(row) >= 13 and row[12] != None):\r\n # new playlist\r\n print('%s' % row)\r\n if(len(row) < 14):\r\n print(\"missing date for playlist \" + str(row[12]))\r\n playlistdate = ''\r\n else:\r\n playlistdate = str(row[13])\r\n #print('new v2 playlist ' + str(row[12]) + ' - ' + playlistdate)\r\n self.insertPlaylist(row[12], playlistdate)\r\n\r\n if len(row) >= 12 and row[8] != None:\r\n # new race\r\n #print(\"new race\")\r\n vehicle = row[11]\r\n isCanceled = False\r\n if row[1] == None or row[1] == \"\":\r\n isCanceled = True\r\n playlistid = int(self.getCurrentPlaylistId())\r\n racenumber = self.getNextRaceNumber(playlistid)\r\n self.insertRaceWithMetadata(playlistid, racenumber, isCanceled, row[8], row[9], row[10], row[11])\r\n raceid = self.getCurrentRaceId()\r\n rank = 0\r\n rankmod = 0\r\n\r\n if len(row) >= 7 and row[1] != None and row[2] != None and row[2] != \"\":\r\n # new raced\r\n #print(\"new raced\")\r\n rank = row[1]\r\n player = row[2].lower()\r\n wrongvehicle = row[3]\r\n if wrongvehicle == \"x\":\r\n rankmod += 1\r\n rank = 0\r\n racetime = row[4]\r\n bestlap = row[5]\r\n money = row[6]\r\n \r\n self.checkPlayer(player)\r\n self.addVehicle(vehicle)\r\n self.insertRaced(raceid, rank - rankmod, bestlap, racetime, vehicle, player, money, wrongvehicle)\r\n rank += 1\r\n\r\n # close\r\n self.conn.commit()\r\n self.end()", "def savedict(self, obj, table):\n if not isinstance(obj, dict): return False\n\n keys = ['`%s`' % key for key in obj.keys()]\n values = [None if value == '' else value for value in obj.values()]\n\n sql = 'REPLACE INTO %s (%s) VALUES (%s)' % (table, ','.join(keys), ','.join(['%s'] * len(values)))\n self.execute(sql, values)", "def savedict(self, obj, table):\n\t\tif not isinstance(obj, dict): return False\n\n\t\tkeys = ['`%s`' % key for key in obj.keys()]\n\t\tvalues = [None if value == '' else value for value in obj.values()]\n\n\t\tsql = 'REPLACE INTO %s (%s) VALUES (%s)' % (table, ','.join(keys), ','.join(['%s'] * len(values)))\n\t\tself.execute(sql, values)", "def update(self, table, col, new_val, key, key_val):\n\n conn = psycopg2.connect(self.name, sslmode='require')\n c = conn.cursor()\n if(type(new_val) == str):\n c.execute(\"UPDATE {tn} SET {cn}='{val}' WHERE {key}={key_val}\".format(tn=table, cn=col, val=new_val, key=key, key_val=key_val))\n else:\n c.execute(\"UPDATE {tn} SET {cn}= {val} WHERE {key}={key_val}\".format(tn=table, cn=col, val=new_val, key=key, key_val=key_val))\n conn.commit()\n conn.close()", "def refresh():\r\n DB.drop_all()\r\n DB.create_all()\r\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\r\n for i in time_x_values():\r\n DB.session.add(Record(datetime=i[0], value=i[1]))\r\n DB.session.commit()\r\n return 'Data refreshed!'", "def s3_table_set_before_write(cls, table):\n\n update_default = cls.s3_table_name_update_default\n\n table._before_insert.append(update_default)\n table._before_update.append(lambda s, data: update_default(data))", "def upsert_table(self, df_diff, n_batch=5000, debug=False):\n\n n_items = len(df_diff)\n queries = []\n upsert_query = ' '.join(\n ('INSERT INTO \"{tablename}\"(\"cartodb_id\", \"{colname}\")',\n 'VALUES ({cartodb_id}, {colval})',\n 'ON CONFLICT (\"cartodb_id\")',\n 'DO UPDATE SET \"{colname}\" = {colval}',\n 'WHERE EXCLUDED.\"cartodb_id\" = {cartodb_id};'))\n n_batches = n_items // n_batch\n batch_num = 1\n for row_num, row in enumerate(df_diff.iteritems()):\n # if debug: print(row)\n cartodb_id = row[0][0]\n colname = row[0][1]\n pgtype = dtype_to_pgtype(self[colname].dtype, colname)\n # fill query template\n temp_query = upsert_query.format(\n tablename=self.get_carto_tablename(),\n colname=colname,\n colval=numpy_val_to_pg_val(self.loc[cartodb_id][colname],\n pgtype),\n cartodb_id=cartodb_id)\n\n queries.append(temp_query)\n\n # run batch if at n_batch queries, or at last item\n if (len(queries) == n_batch) or (row_num == n_items - 1):\n batchquery = '\\n'.join(queries)\n print(\"{curr_batch} of {n_batches}\".format(\n curr_batch=batch_num,\n n_batches=n_batches))\n batch_num = batch_num + 1\n if debug: print(\"Num chars in batch: {}\".format(len(batchquery)))\n if debug: print(batchquery)\n\n # send batch query to carto\n resp = self.carto_sql_client.send(batchquery)\n if debug: print(resp)\n\n # clear for another batch\n queries = []\n\n return None", "def abstract_write(self, *params): \n section = self.table_section_from_parameter(*params)\n row_args = self.table_row_args_from_parameter(*params)\n new_row = self.table_row_class(*row_args)\n # IMPORTANTE! ASUMO QUE UN BLOQUE INVALIDO ES IGUAL A UNO VACIO!\n # TO DO: PREGUNTAR EN ISSUE\n found_index = False\n for index, table_row in enumerate(section.copy()): \n if table_row is None or not table_row.valid:\n found_index = True\n overwrite_index = index\n break\n\n if not found_index:\n overwrite_index = self.next_replace(section)\n\n replace_index = self.index_mapping_from_parameter(overwrite_index, *params)\n\n\n old_line = self.table[replace_index]\n #print(f\"{self.__class__.__name__} Replace -> Index: {replace_index}\")\n\n # Perfom the actual write\n self.table[replace_index] = new_row", "def insert_data():\n table = create_new_table()\n filename = '/home/nineleaps/Downloads/covid_info_.csv'\n dataset_ref = client.dataset(table.dataset_id)\n table_ref = dataset_ref.table(table.table_id)\n job_config = bigquery.LoadJobConfig()\n job_config.source_format = bigquery.SourceFormat.CSV\n job_config.skip_leading_rows = 1\n job_config.autodetect = True\n with open(filename, \"rb\") as source_file:\n job = client.load_table_from_file(source_file, table_ref, job_config=job_config)\n job.result()\n print(\"Loaded {} rows into {}:{}.\".format(job.output_rows, table.dataset_id, table.table_id))", "def before_update(mapper, conn, target):\n\n if target.id_ is None:\n\n if target.table:\n table_on = ObjectNumber.parse(target.table.vid)\n else:\n table_on = ObjectNumber.parse(target.t_vid)\n\n if not target.vid:\n target.vid = str(ColumnNumber(table_on, target.sequence_id))\n\n if not target.id_:\n target.id_ = str(ColumnNumber(table_on, target.sequence_id).rev(None))\n\n target.d_vid = str(ObjectNumber.parse(target.t_vid).as_dataset)", "def update(table, id_):\n\n count=0\n searched_index=-1\n in_it=False\n for i in table:\n if i[0]==id_:\n searched_index=count\n in_it=True\n count+=1\n \n if in_it:\n to_change=ui.get_inputs(list_labels,\"\")\n to_change.insert(0,common.generate_random(table))\n table[searched_index]=to_change\n\n return table\n \n else:\n ui.print_error_message(\"ID is not found\")", "def fill_table(self, executer, tree, cursor, table):\n counter = 0\n table_content = executer.lots_of_eggs(cursor, table)\n for line in table_content:\n tree.insert('', 'end', text=counter, values=line)\n counter += 1", "def datasource_untouched():\r\n original_data = copy.deepcopy(MEMORY_DATA)\r\n\r\n table = UnorderedTable(MEMORY_DATA)\r\n table.order_by = 'i'\r\n list(table.rows)\r\n assert MEMORY_DATA == original_data\r\n\r\n table = UnorderedTable(MEMORY_DATA)\r\n table.order_by = 'beta'\r\n list(table.rows)\r\n assert MEMORY_DATA == original_data", "def update(self, table_name, data, where):\n fields = []\n for fname in data:\n fields.append(\"`{0}` = '{1}'\".format(fname, self.escape(data[fname])))\n\n self.q(\"UPDATE `{0}` SET {1} WHERE {2}\".format(table_name, \", \".join(fields), where))", "def update_table_column(column, value):\n return cr.update_customer(column, value)", "def fill_target_table(new_data, curs, conn, overwrite=False):\n for i in new_data:\n connect_database.add_target_to_database(list(i), curs, conn, overwrite_exsiting = overwrite)\n conn.commit()", "def upload_df_to_bq(df, project_id, table_id):\n\n df.to_gbq(table_id, project_id=project_id, if_exists='replace')\n return 'table uploaded to ' + project_id + '.' + table_id", "def to_upsert():\n \n return (out['parameters']['dataset'], out['parameters']['timezone'], \n out['parameters']['rows'], out['parameters']['format'], \n out['parameters']['refine']['ano'], out['parameters']['refine']['mes'], \n out['parameters']['metadata']['fecha_ejecucion'], \n out['parameters']['metadata']['parametros_url'], \n out['parameters']['metadata']['ip_address'], \n out['parameters']['metadata']['usuario'], \n out['parameters']['metadata']['nombre_archivo'], \n out['parameters']['metadata']['ruta'])", "def update(table, id_):\n ID = 0\n ids = [item[ID] for item in table]\n if id_ not in ids:\n raise ValueError(\"The given ID not in the table.\")\n titles_sales = [\"Name: \", \"Birth Year: \"]\n inputs = ui.get_inputs(titles_sales, \"Specify new properties\")\n for index, item in enumerate(table):\n if id_ == item[ID]:\n table[index] = inputs\n table[index].insert(0, id_)\n return table", "def gen_new_table(db_old, db_new, table, col_index, new_col_list, ord_users, ord_subs):\n con = lite.connect(db_old)\n with con:\n cur = con.cursor()\n cur.execute(\"SELECT * FROM \" + table)\n tuple_list = cur.fetchall()\n for i in range(0, len(new_col_list)):\n tuple_list[i] = replace_tuple(tuple_list[i], new_col_list[i], col_index)\n #anonymize username and submission id\n if(table == \"Comments\"):\n anon_users = anonymize(strip_tuple(tuple_list, 1), ord_users)\n anon_subs = anonymize(strip_tuple(tuple_list, 5), ord_subs)\n for i in range(0, len(new_col_list)):\n tuple_list[i] = replace_tuple(tuple_list[i], anon_users[i], 1)\n tuple_list[i] = replace_tuple(tuple_list[i], anon_subs[i], 5)\n elif(table == \"Submissions\"):\n for i in range(0, len(new_col_list)):\n tuple_list[i] = replace_tuple(tuple_list[i], i, 0)\n num_bindings = len(tuple_list[0])\n bindings = ('?,' * num_bindings)[:-1]\n con = lite.connect(db_new)\n with con:\n cur = con.cursor()\n cur.executemany(\"INSERT INTO \" + table + \" VALUES\" + \" (\"+ bindings + \")\", tuple_list)", "def update(table, id_):\n\n for i in table:\n if i[0] == id_:\n i[1] = ui.get_inputs([\"What should i update the titel to: \"],\"\")\n i[2] = ui.get_inputs([\"What should I update the manufacturer to? \"],\"\")\n i[3] = ui.get_inputs([\"What should I update the year of purchase to? \"],\"\")\n i[4] = ui.get_inputs([\"What should I update the durability time in year/s? \"],\"\")\n data_manager.write_table_to_file(\"inventory/inventory.csv\", table)\n\n return table", "def online_write_batch(\n self,\n config: RepoConfig,\n table: FeatureView,\n data: List[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n\n hbase = HbaseUtils(self._get_conn(config))\n project = config.project\n table_name = _table_id(project, table)\n\n b = hbase.batch(table_name)\n for entity_key, values, timestamp, created_ts in data:\n row_key = serialize_entity_key(\n entity_key,\n entity_key_serialization_version=config.entity_key_serialization_version,\n ).hex()\n values_dict = {}\n for feature_name, val in values.items():\n values_dict[\n HbaseConstants.get_col_from_feature(feature_name)\n ] = val.SerializeToString()\n if isinstance(timestamp, datetime):\n values_dict[HbaseConstants.DEFAULT_EVENT_TS] = struct.pack(\n \">L\", int(calendar.timegm(timestamp.timetuple()))\n )\n else:\n values_dict[HbaseConstants.DEFAULT_EVENT_TS] = timestamp\n if created_ts is not None:\n if isinstance(created_ts, datetime):\n values_dict[HbaseConstants.DEFAULT_CREATED_TS] = struct.pack(\n \">L\", int(calendar.timegm(created_ts.timetuple()))\n )\n else:\n values_dict[HbaseConstants.DEFAULT_CREATED_TS] = created_ts\n b.put(row_key, values_dict)\n b.send()", "def update_got_plt_table_data(self, new_data):\n\n self.update_got_plt_table.emit([], True)\n for entry in new_data:\n self.update_got_plt_table.emit(entry, False)", "def update(table, id_):\n\n # your code\n\n ID = 0\n ids = [item[ID] for item in table]\n if id_ not in ids:\n raise ValueError(\"The given ID not in the table.\")\n inventory_data = [\"Product: \", \"Manufacturer: \", \"Release date: \", \"Durability: \"]\n inputs = ui.get_inputs(inventory_data, \"Specify new properties\")\n for index, item in enumerate(table):\n if id_ == item[ID]:\n table[index] = inputs\n table[index].insert(0, id_)\n return table", "def table_edit_callback(data, data_previous):\n # Determine where the change occurred\n diff = diff_dashtable(data, data_previous)\n\n for d in diff:\n r_changed = d['index']\n c_changed = d['column_name']\n print(f\"Caught a change in the table at {r_changed} {c_changed}!\")\n\n # # If the column is empty it won't be in the dict. Use .get to handle this with empty string as default\n # data[r_changed][CHANGED_COLUMN] = f\"{data[r_changed].get(CHANGED_COLUMN, '')} {CHANGED_PAD_START}{c_changed}{CHANGED_PAD_END}\"\n return data", "def write(self):\n if not self._table: raise ValueError ( \"_table is Null\" )\n if self._isnew:\n for m in self._modified_values:\n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n try:\n rec = CFG.CX.insert ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n #this will automatically re-read the data from the db, to take all changes\n #done by triggers and default values into account.\n self._objectid = rec['objectid']\n\n #print \"Record # {0} inserted into {1}.\".format(self._objectid, self._table.name)\n self.raiseEvent ( \"record_added\", self )\n \n except pg.DatabaseError, e:\n print \"Error inserting record.\"\n raise Record.DataManipulationError ( \"Inserting a new record into '{0}'\".format(self._table.name),\n str(self._modified_values),\n e)\n elif self._ismodified:\n \n for m in self._modified_values: \n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n self._modified_values['objectid'] = self._objectid\n del self.TextCache[self._objectid]\n try:\n rec = CFG.CX.update ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n self.read() \n self.raiseEvent ( \"record_saved\", self )\n except pg.DatabaseError, e:\n print \"Error updating record\"\n raise Record.DataManipulationError ( \"Updating record {1} of '{0}'\".format(self._table.name, self._objectid),\n str(self._modified_values),\n e)", "def patch_mbean_table_value(self, mbean):\n for attribute in mbean['attributes']:\n if 'Table' in attribute:\n value = attribute['Table']\n attribute['Table'] = Utils.boolean_to_lowercase_literal(value)\n\n logger.debug('MBean patched result : [%s]', mbean)", "def ChangeTabuleiro(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def table(self, table):\n self._table = table", "def table(self, table):\n self._table = table", "def set_table_key(rows: List[Dict[str, str]], key: str) -> List[Dict[str, str]]:\n new_rows: List[Dict[str, str]] = []\n for row in rows:\n new_row = dict(row)\n new_row[\"_key\"] = new_row[key]\n new_rows.append(new_row)\n\n return new_rows", "def update_DB(self, iterable, entry_columns, update):\n conn = self.conn\n bulk = []\n old_bulk = []\n list_of_id_db = list()\n list_of_id_atuais = self.lista_atual()\n list_of_id_afastados = self.lista_afastados()\n\n if update:\n list_of_id_db = conn.execute('SELECT id_parlamentar FROM {}.{}'.format(self.schema, self.table))\n list_of_id_db = [tup[0] for tup in list_of_id_db]\n id_row_historic = list(conn.execute('SELECT MAX(id) FROM {}.{}_historic'.format(self.schema, self.table)))[0][0]\n if not id_row_historic:\n id_row_historic = 0\n\n for senador in tqdm(iterable):\n entry = self.fill_entry_senador(senador,entry_columns)\n id_parlamentar = entry['id_parlamentar']\n\n if id_parlamentar in list_of_id_atuais:\n entry['situacao_parlamentar'] = 'atual'\n elif id_parlamentar in list_of_id_afastados:\n entry['situacao_parlamentar'] = 'afastado'\n\n if id_parlamentar in list_of_id_db:\n compare_columns = 'id_parlamentar, nome_completo, nome_parlamentar_atual, forma_tratamento, sexo_parlamentar, data_nascimento, data_falecimento, sigla_uf_origem, endereco_origem, nome_cidade_origem, codigo_estado_civil, endereco_congresso, fone, fax, website, email, profissao, id_camara, id_senado, cpf, titulo_de_eleitor, descricao_participacao'\n\n old_row = conn.execute(\"SELECT {} FROM {}.{} WHERE id_parlamentar='{}'\".format(compare_columns,self.schema, self.table,id_parlamentar))\n old_row = list(old_row)[0]\n new_row = tuple([entry[column] for column in compare_columns.split(', ')])\n\n if old_row != new_row:\n old_entry = copy.deepcopy(entry_columns)\n\n for key in old_entry.keys():\n old_date = conn.execute(\"SELECT {} FROM {}.{} WHERE id_parlamentar='{}'\".format(key,self.schema, self.table,id_parlamentar))\n old_entry[key] = list(old_date)[0][0]\n old_entry['change_date'] = datetime.datetime.today() #capture of change date\n id_row_historic += 1\n old_entry['id'] = id_row_historic\n\n old_bulk.append(old_entry)\n conn.execute(\"DELETE FROM {}.{} WHERE id_parlamentar='{}'\".format(self.schema, self.table,id_parlamentar))\n\n bulk.append(entry)\n else:\n bulk.append(entry)\n\n if len(bulk) > 0:\n df = pd.DataFrame(bulk)\n df.set_index('id_parlamentar', inplace=True)\n print('Adding {} entries to SQL table {}.{}.'.format(len(df),self.schema, self.table))\n df.to_sql(self.table, con=self.conn, schema=self.schema, if_exists='append')\n\n if len(old_bulk) > 0:\n df2 = pd.DataFrame(old_bulk)\n df2.set_index('id_parlamentar', inplace=True)\n historic_table_name = self.table + '_historic'\n print('Adding {} entries to SQL table {}.{}.'.format(len(df2),self.schema, historic_table_name))\n df2.to_sql(historic_table_name, con=self.conn, schema=self.schema, if_exists='append')", "def before_update(mapper, conn, target):\n if isinstance(target, Column):\n raise TypeError('Got a column instead of a table')\n\n if target.id_ is None:\n dataset_id = ObjectNumber.parse(target.d_id)\n target.id_ = str(TableNumber(dataset_id, target.sequence_id))", "def update(table, id_):\n\n # 4\n for index in range(len(table)):\n if table[index][0] == id_:\n addnew = ui.get_inputs(\n ['name: ', 'birth_year: '],\n 'Updating list of hr')\n addnew.insert(0, id_)\n table[index] = addnew\n data_manager.write_table_to_file('hr/persons.csv', table)\n return table", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n aq_data.add_aq_to_db()\n DB.session.commit()\n return 'Data refreshed!'", "def update(table, id_, record):\n\n new_table = []\n for element in table:\n if id_ == element[0]:\n element = record\n new_table.append(element)\n else:\n new_table.append(element)\n table = new_table\n\n return table", "def update_table(table_name, url):\r\n if table_name == \"on_progress_pages\":\r\n sql_query = \"UPDATE on_progress_pages SET is_scrapped=1 WHERE page_url = %s\"\r\n elif table_name == \"on_progress_domains\":\r\n sql_query = \"UPDATE on_progress_domains SET is_scrapped=1 WHERE url = %s\"\r\n else:\r\n print \"unknown ERROR\"\r\n return\r\n db = connect()\r\n cursor = db.cursor()\r\n try:\r\n cursor.execute(sql_query, (url,))\r\n db.commit()\r\n except:\r\n cursor.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()", "def put_item(key, value):\n try:\n response = table.put_item( Item={ 'my-key': key, 'some-other-key': value )\n print(f\"Successfully added new item\")\n print(f\"Response : {response}\")\n except ClientError as ce:\n print(f\"Failed to creat new item - key : {key}, value : {value}\")\n print(ce)\n\ndef update_nested_item(key, value):\n \"\"\"\n Update a nested item. create \n \"\"\"\n try:\n response = table.update_item( Key={ 'my-key': key },\n UpdateExpression='SET #other-key = :new_value',\n ExpressionAttributeNames={\n '#other-key': 'New-Key'\n },\n ExpressionAttributeValues={ ':new_value': True },\n ReturnValues='ALL_NEW'\n )\n print(\"Successfully created/updated item.\")\n print(f\"Response : {response}\")\n except ClientError as ce:\n print(f\"Failed to update item : {ce}\")", "def on_put(self, req, resp, table, id):\n user = req.context['user']\n pairs = req.context['doc']['values']\n keys = pairs.keys()\n set_clause = [\"`{}`=:{}\".format(k, k) for k in keys]\n set_clause = ','.join(set_clause)\n engine = user_db_engine(user)\n query = \"UPDATE {} SET {} WHERE id=:id\".format(table, set_clause)\n try:\n pairs['id'] = int(id)\n except ValueError:\n raise exceptions.HTTPBadRequestError(\"Invalid ID\")\n\n with engine.new_session() as conn:\n result = conn.execute(query, pairs)\n\n if config.use_cache():\n key = _make_key(engine, table, \"*\", id, -1)\n cache.invalidate_query_pattern(\"{}\".format(key))\n resp.context['result'] = {'result': 'ok'}\n resp.status = falcon.HTTP_200", "def transform_table_data(tableRows: list, table: bigquery.Table):\n colSchema: list = table.schema\n assert len(tableRows[0]) <= len(colSchema), f'table should have at most as many columns as its schema: {len(tableRows[0])} ! <= {len(colSchema)}'\n formatter = []\n for schemaField in colSchema:\n fn = None\n if schemaField.field_type in ('INT64', 'INTEGER'):\n fn = get_as_int\n elif schemaField.field_type == ('FLOAT64', 'FLOAT'):\n fn = float\n elif schemaField.field_type != 'STRING': print(schemaField.field_type)\n formatter.append(fn)\n\n for row in tableRows:\n for (idx, val) in enumerate(row):\n fn = formatter[idx]\n if fn is not None:\n result = fn(val)\n row[idx] = result if result is not None else 0\n return", "def update_item(self, table, item):", "def update_result(result):\n invader = result['invader'] \n countryOfInvader = result['countryOfInvader'] \n defender = result['defender'] \n countryOfdefender = result['countryOfdefender'] \n invasionHistory = InvasionHistory(\n invader ,\n countryOfInvader,\n defender,\n countryOfdefender,\n datetime.datetime.now() )\n\n #update territory info\n t_record = Territory.get_by_key_name(countryOfdefender)\n t_record.occupier = invader\n t_record.put() \n\n db.put([invasionHistory])", "def callback_tablechanged(table_data):\n return {\n \"data\": [\n {\"x\": [row[\"x0\"], row[\"x1\"]], \"y\": [row[\"y0\"], row[\"y1\"]], \"type\": \"line\"}\n for row in table_data\n ]\n }", "def generate_table(self, rows):\n ...", "def update_nypd_complaint_results():\n start_datetime, end_datetime = get_start_end_datetimes()\n\n cols_to_keep = \"boro_nm,cmplnt_fr_dt,cmplnt_to_dt,juris_desc,law_cat_cd,ofns_desc,prem_typ_desc,longitude,latitude\"\n results = SOCRATA_CLIENT.get(\n NYPD_COMPLAINT_ID, \n limit=10, \n select=cols_to_keep,\n where=f\"cmplnt_to_dt IS NULL and cmplnt_fr_dt between '{start_datetime.isoformat()}' and '{end_datetime.isoformat()}'\")\n\n df = pd.DataFrame.from_records(results)\n cols = list(df)\n for c in cols:\n df[c].fillna(\"null\", inplace=True)\n __upload_to_gcp_bucket(df, NYPD_COMPLAINT_FNAME)\n return df", "def do_replace(self, tx, item):\n sql = \"\"\"REPLACE INTO des_raw_parsed_data (id, content, url, source, exception_detail, exception_code, site, template_id, domain, classify, subclass)\n VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\"\"\n content = json.dumps(dict(item[\"other_parameter\"]),ensure_ascii=False)\n # if content:\n # content = content.decode('unicode_escape')\n args = (\n item[\"uuid\"],\n content,\n item[\"url\"],\n item[\"source\"],\n json.dumps(item[\"exception\"],ensure_ascii=False),\n item[\"exception_code\"],\n item[\"site\"],\n item[\"template_id\"],\n item[\"domain\"],\n item[\"classify\"],\n item[\"subclass\"]\n )\n\n tx.execute(sql, args)", "def updated_full_record(full_record):\n full_record[\"access\"][\"status\"] = \"embargoed\"\n full_record[\"created\"] = \"2023-03-23T00:00:00.000000+00:00\"\n full_record[\"id\"] = \"abcde-fghij\"\n full_record[\"metadata\"][\"resource_type\"][\"id\"] = \"other\"\n\n return full_record", "def update_data():\n pass", "def write_to_database(database, table, data):\r\n in_tests.test_write_to_database_from_dict(database, table, data)\r\n print (f\" Insert or update data in `{database} > {table}`...\")\r\n\r\n connection = sqlite3.connect(database)\r\n cursor = connection.cursor()\r\n counter = 1\r\n query_columns = \", \".join(data.keys())\r\n query_values = f\"{'?, ' * len(data)}\"[:-2]\r\n query = \\\r\nf\"INSERT OR REPLACE INTO {table} ({query_columns}) VALUES ({query_values});\"\r\n cursor.execute(query, list(data.values()))\r\n connection.commit()\r\n database_changes = connection.total_changes\r\n cursor.close()\r\n connection.close()\r\n out_tests.test_write_to_database(database_changes, counter)\r\n return (database_changes)", "def clean(self, table_name, column, value=None):\n table = self._create_table(table_name)\n row = {column: value}\n statement = table.update().values(**row)\n return self._try_to_execute_statement(statement)", "def array_update(self, table_list):\r\n for tbl in table_list:\r\n x = kit.SQL_pull('name, subject_id', tbl)\r\n r = {i[0]: i[1] for i in x}\r\n h = {i[1]: tbl for i in x}\r\n \r\n self.refference.update(r)\r\n self.home_table.update(h)\r\n \r\n self.counts[tbl] = len(x)", "def update_data(self, update_key, update_values):\n engine = sqlalchemy.create_engine(self.db_uri)\n\n final_where_cond = create_cond_string(update_key, flag='where')\n read_sql = f\"SELECT * FROM {self.database_params['database']}.{self.database_params['table']} WHERE {final_where_cond};\"\n\n update_df = pd.read_sql_query(sql=read_sql, con=engine)\n\n if update_df.shape[0] == 0:\n raise Exception(\"No data in the database for given key(s)\")\n else:\n update_df.drop(columns=['id'], inplace=True)\n\n for key in update_values:\n update_df[key] = update_values[key]\n\n # New dataframe for the updated data\n update_df = create_hash_id(update_df)\n update_values['id'] = update_df['id'].iloc[0]\n\n # Create update query with the update data\n final_set_cond = create_cond_string(update_values, flag='set')\n update_sql = f\"UPDATE {self.database_params['database']}.{self.database_params['table']} SET {final_set_cond} WHERE {final_where_cond};\"\n\n # Update the database\n engine.execute(update_sql)", "def patch(self, table_number):\n table = TableDetails.query.get_or_404(table_number)\n\n if 'table_size' in request.json:\n table.table_size = request.json['table_size']\n if 'table_status' in request.json:\n table.table_status = request.json['table_status']\n db.session.commit()\n return table, 200", "def refresh( self ):\n\n def get_bucket( line_spec,value ):\n if not self.has_column(value.column_name):\n self.add_column(Column(name=value.column_name))\n bc = self.get_column(value.column_name)\n for idx in range(bc.size()):\n if bc.get(idx).get_value() >= value.get_value():\n break\n else:\n idx = bc.size()\n if idx < bc.size():\n if line_spec[\"bucket_type\"] == string_type:\n if bc.get(idx).get_value() != value.get_value():\n bc.ins(idx,Cell(line_spec[\"bucket_type\"],value.get_value(),format_map[line_spec[\"bucket_type\"]]))\n return idx\n elif idx == 0 and bc.size() > 0:\n diff = bc.get(idx).get_value() - value.get_value()\n if line_spec[\"bucket_type\"] == date_type:\n while diff > timedelta(minutes=line_spec[\"bucket_size\"]):\n new_bucket = bc.get(idx).get_value() - timedelta(minutes=line_spec[\"bucket_size\"])\n bc.ins(idx,Cell(line_spec[\"bucket_type\"],new_bucket,format_map[line_spec[\"bucket_type\"]]))\n diff = bc.get(idx).get_value() - value.get_value()\n return idx\n elif line_spec[\"bucket_type\"] == string_type:\n bc.ins(idx,Cell(line_spec[\"bucket_type\"],value.get_value(),format_map[line_spec[\"bucket_type\"]]))\n return idx\n else:\n while diff > line_spec[\"bucket_size\"]:\n new_bucket = bc.get(idx).get_value() - line_spec[\"bucket_size\"]\n bc.ins(idx,Cell(line_spec[\"bucket_type\"],new_bucket,format_map[line_spec[\"bucket_type\"]]))\n diff = bc.get(idx).get_value() - value.get_value()\n return idx\n elif idx == bc.size():\n if line_spec[\"bucket_type\"] == string_type:\n bc.put(idx,Cell(line_spec[\"bucket_type\"],value.get_value(),format_map[line_spec[\"bucket_type\"]]))\n return idx\n else:\n while True:\n if idx > 0:\n prev_bucket = bc.get(idx-1).get_value()\n else:\n prev_bucket = value.get_value()\n\n if line_spec[\"bucket_type\"] == date_type:\n new_bucket = prev_bucket + timedelta(minutes=line_spec[\"bucket_size\"])\n else:\n new_bucket = prev_bucket + line_spec[\"bucket_size\"]\n\n bc.put(idx,Cell(line_spec[\"bucket_type\"],new_bucket,format_map[line_spec[\"bucket_type\"]]))\n if value.get_value() < new_bucket:\n return idx\n idx = bc.size()\n\n def put_value( value, bidx ):\n if not self.has_column(value.column_name):\n self.add_column(Column(name=value.column_name))\n cc = self.get_column(value.column_name)\n if bidx < cc.size():\n c = cc.get(bidx)\n if c.type == blank_type:\n cc.put(bidx,value.to_cell())\n else:\n cc.get(bidx).put_value(value.get_value())\n else:\n cc.put(bidx,value.to_cell())\n\n def prune_buckets( line_spec ):\n for group,column_name,type,action in line_spec[\"column_map\"]:\n if self.has_column(column_name):\n cc = self.get_column(column_name)\n while cc.size() > line_spec[\"num_buckets\"]:\n cc.delete(0)\n\n def top_buckets( line_spec ):\n columns = []\n key_idx = None\n idx = 0\n for group,column_name,type,action in line_spec[\"column_map\"]:\n columns.append(self.get_column(column_name))\n if action == \"key\":\n key_idx = idx\n idx += 1\n\n sort_rows = []\n for idx in range(columns[key_idx].size()):\n values = []\n for cidx in range(len(columns)):\n if cidx != key_idx:\n values.append(columns[cidx].get(idx).get_value())\n values.append(idx)\n sort_rows.append(values)\n\n sort_rows.sort(reverse=True)\n new_columns = []\n for group,column_name,type,action in line_spec[\"column_map\"]:\n new_columns.append(Column(name=column_name))\n\n for ridx in range(min(len(sort_rows),line_spec[\"num_buckets\"])):\n for cidx in range(len(columns)):\n new_columns[cidx].put(sort_rows[ridx][-1],columns[cidx].get(sort_rows[ridx][-1]))\n\n for c in new_columns:\n self.replace_column(self.map_column(c.get_name()),c)\n\n lb_days,lb_hours,lb_minutes = self.log_lookback\n start_time = datetime.now() - timedelta(days=lb_days,hours=lb_hours,minutes=lb_minutes)\n\n log_files = glob.glob(self.log_glob)\n\n for lf in log_files:\n lfp = 0\n stat = os.stat(lf)\n if stat.st_mtime < start_time.timestamp():\n continue\n\n if lf in self.file_map:\n lft,lfp = self.file_map[lf]\n if stat.st_mtime <= lft:\n continue\n\n if lf.endswith(\".gz\"):\n lf_f = gzip.open(lf,\"rt\",encoding=\"utf-8\")\n else:\n lf_f = open(lf,\"r\",encoding=\"utf-8\")\n\n lf_f.seek(lfp,0)\n\n for line in lf_f:\n line = line.strip()\n for line_spec in self.log_map:\n m = re.match(line_spec[\"line_regex\"],line)\n if m:\n values = []\n key_idx = None\n for group,column_name,type,action in line_spec[\"column_map\"]:\n values.append(Value( column_name, type, action, m.group(group) ))\n if action == \"key\":\n key_idx = len(values)-1\n bidx = get_bucket(line_spec,values[key_idx])\n for v in values:\n if v.action != \"key\":\n put_value( v, bidx )\n if values[key_idx].type != string_type:\n prune_buckets(line_spec)\n\n self.file_map[lf] = (stat.st_mtime,lf_f.tell())\n\n for line_spec in self.log_map:\n key_idx = None\n idx = 0\n for group,column_name,type,action in line_spec[\"column_map\"]:\n if action == \"key\":\n key_idx = idx\n break\n idx += 1\n\n kg,kn,kt,ka = line_spec[\"column_map\"][key_idx]\n kc = self.get_column(kn)\n for idx in range(kc.size()):\n for fg,fn,ft,fa in line_spec[\"column_map\"]:\n if fn != kn:\n fc = self.get_column(fn)\n cc = fc.get(idx)\n if cc.type == blank_type:\n fc.put(idx,ActionCell(ft,None,format_map[ft],fa))\n\n if kt == string_type:\n top_buckets( line_spec )\n\n self.changed()\n\n DataTable.refresh(self)" ]
[ "0.6006506", "0.60053116", "0.5916676", "0.58872443", "0.58701134", "0.5751219", "0.5727902", "0.57181513", "0.5715651", "0.56847066", "0.5664049", "0.56344426", "0.5623332", "0.55554026", "0.5548725", "0.54871124", "0.5472866", "0.5461745", "0.54568017", "0.54271203", "0.54252964", "0.5409129", "0.53968215", "0.5366817", "0.53643525", "0.5350525", "0.53456247", "0.5337937", "0.5318152", "0.5317226", "0.5306835", "0.5306459", "0.53027534", "0.5285692", "0.5246677", "0.5242569", "0.522844", "0.5216071", "0.5215392", "0.5214289", "0.5209298", "0.5194183", "0.51832277", "0.51832277", "0.51832277", "0.5179415", "0.51695687", "0.5166141", "0.51645154", "0.5164141", "0.5163119", "0.51516896", "0.514859", "0.51480293", "0.51423025", "0.51265496", "0.51264554", "0.51232326", "0.51135737", "0.5091135", "0.5088558", "0.50838554", "0.508192", "0.50818324", "0.5074258", "0.5065309", "0.50639546", "0.5059588", "0.50500476", "0.5048304", "0.50450736", "0.5042039", "0.50413597", "0.5039849", "0.50318927", "0.50318927", "0.50300074", "0.5028906", "0.5021792", "0.5019468", "0.50187314", "0.5018027", "0.5010671", "0.5007328", "0.49963328", "0.49947187", "0.49918425", "0.49899387", "0.4985298", "0.49791577", "0.49777383", "0.49755248", "0.4974546", "0.497437", "0.49636275", "0.49635497", "0.49511647", "0.494989", "0.49423012", "0.4941267" ]
0.6709368
0
Get output for a finished job with id.
def getOutput(self): self.checkBeforeGet() # Get first job of the list if not self.dontCheckSpaceLeft and not has_freespace(self.outDir, 10*1024): # First check for more than 10 Mb msg = "You have LESS than 10 MB of free space on your working dir\n" msg +="Please make some room before retrying\n\n" msg +="To bypass this check, run \n" msg +="crab -get -USER.dontCheckSpaceLeft=1 \n" raise CrabException(msg) list_first=self.list_id[0:1] task= common.scheduler.getOutput(1, list_first, self.outDir) lastSize = self.organizeOutput( task, list_first ) # here check disk space for first job if not self.dontCheckSpaceLeft and not has_freespace(self.outDir, lastSize*len(self.list_id)*1.2) : # add a 20% overhead msg = "Estimated space needed for getOutput is "+str(lastSize*len(self.list_id)*1.2) msg +=" which is LESS than available space on disk\n" msg +="Please make some room before retrying\n" msg +="To bypass this check, run \n" msg +="crab -get -USER.dontCheckSpaceLeft=1 \n" raise CrabException(msg) # get the size of the actual OSB of first job if (len(self.list_id)>1) : # check disk space for other N jobs using estimate from the first list_other=self.list_id[1:] task= common.scheduler.getOutput(1, list_other, self.outDir) self.organizeOutput( task, list_other ) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def job_output(self, job_id):\n\n url = self.base_url + \"/ml-service/phoenix-ml/output/findBy?jobId={0}\".format(job_id)\n headers = {\"ApiKey\": self.api_key}\n response = requests.get(url=url, headers=headers)\n return response.json()", "def completed_job_info(self, jobid=None, output=False):\n\n self.check_all_jobs()\n\n if jobid is None:\n completed_jobids = [key for key in self.job_dict.keys()\n if self.job_dict[key] == 'COMPLETED']\n response_list = [\n self._request(\n 'GET',\n CosmoSim.QUERY_URL + \"/{}\".format(completed_jobids[i]),\n auth=(self.username, self.password), cache=False)\n for i in range(len(completed_jobids))]\n self.response_dict_current = {}\n for i, vals in enumerate(completed_jobids):\n self.response_dict_current[vals] = (\n self._generate_response_dict(response_list[i]))\n else:\n if self.job_dict[jobid] == 'COMPLETED':\n response_list = [\n self._request(\n 'GET', CosmoSim.QUERY_URL + \"/{}\".format(jobid),\n auth=(self.username, self.password), cache=False)]\n self.response_dict_current = {}\n self.response_dict_current[jobid] = (\n self._generate_response_dict(response_list[0]))\n else:\n warnings.warn(\"JobID must refer to a query with a phase \"\n \"of 'COMPLETED'.\")\n return\n\n if output is True:\n dictkeys = self.response_dict_current.keys()\n if len(dictkeys) > 1:\n keys = [i for i in self.response_dict_current.keys()]\n phases = [self.job_dict[key] for key in keys]\n t = Table()\n t['JobID'] = keys\n t['Phase'] = phases\n t.pprint()\n warnings.warn(\"Use specific jobid to get more information, or \"\n \"explore `self.response_dict_current`.\")\n elif len(dictkeys) == 1:\n print(self.response_dict_current[dictkeys[0]]['content'])\n else:\n log.error('No completed jobs found.')\n return\n else:\n return", "def general_job_info(self, jobid=None, output=False):\n\n self.check_all_jobs()\n\n if jobid is None:\n print(\"Job Summary:\\n\"\n \"There are {0} jobs with phase: COMPLETED.\\n\"\n \"There are {1} jobs with phase: ERROR.\\n\"\n \"There are {2} jobs with phase: ABORTED.\\n\"\n \"There are {3} jobs with phase: PENDING.\\n\"\n \"There are {4} jobs with phase: EXECUTING.\\n\"\n \"There are {5} jobs with phase: QUEUED.\\n\"\n \"Try providing a jobid for the job you'd like to \"\n \"know more about.\\n To see a list of all jobs, use \"\n \"`check_all_jobs()`.\"\n .format(self.job_dict.values().count('COMPLETED'),\n self.job_dict.values().count('ERROR'),\n self.job_dict.values().count('ABORTED'),\n self.job_dict.values().count('PENDING'),\n self.job_dict.values().count('EXECUTING'),\n self.job_dict.values().count('QUEUED')))\n return\n else:\n response_list = [self._request(\n 'GET', CosmoSim.QUERY_URL + \"/{}\".format(jobid),\n auth=(self.username, self.password), cache=False)]\n\n if response_list[0].ok is False:\n log.error('Must provide a valid jobid.')\n return\n else:\n self.response_dict_current = {}\n self.response_dict_current[jobid] = (\n self._generate_response_dict(response_list[0]))\n\n if output is True:\n dictkeys = self.response_dict_current.keys()\n print(self.response_dict_current[dictkeys[0]]['content'])\n return\n else:\n return", "def get_job_output(job_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetJobResult]:\n ...", "def view_result(job_id):\n job = fetch_data.AsyncResult(job_id, app=app)\n if job.successful():\n result = job.result\n return jsonify({'job_id': job_id, 'result': job.result})\n else:\n result = 'job was not finished or was not successful'\n return jsonify({'job_id': job_id, 'result': result})", "def info(self, jobid):\n return self.rpc.call(MsfRpcMethod.JobInfo, [jobid])", "def get_task_output(self, task, output_id):\n return self._gdb_interface.get_task_output(task, output_id)", "def job_by_id(self, job_id):\n response = self._session.get(\n path='{base_api}/jobs/{job_id}.xml'.format(\n base_api=self.base_api,\n job_id=job_id\n ),\n headers={'Accept': 'application/xml'},\n )\n\n return response.text", "def get_a_job(job_id):\n job = JobModel.get_one_job(job_id)\n if not job:\n return custom_response({'Error': 'Job Not Found'}, 404)\n\n job_message = job_schema.dump(job, many=True)\n return custom_response(job_message, 200)", "def poll(self, jobid):\n out = subprocess.check_output([\"bjobs\", \"-o\", \"stat\", \"-noheader\",\n str(jobid)])\n if out:\n return out[:-1]\n return \"UNKNOWN\"", "async def job_detail(request, job_id=None):\n current_jobs = dagobah._serialize().get('jobs', {})\n jobs = [job for job in current_jobs if str(job['job_id']) == job_id]\n if not jobs:\n raise ValueError('not find any jobs')\n return template('job_detail.html', job=jobs[0], hosts=dagobah.get_hosts())", "def get_task_output(self, task, output_id):\n output_record = self._read_transaction(tx.get_task_output, task=task, output_id=output_id)\n return _reconstruct_task_output(output_record[\"o\"])", "def qstat_id(job_id):\n\n output_lines = call_qstat([str(job_id)])\n if len(output_lines) != 3:\n raise PBSUtilQStatError('Bad qstat id output:\\n\"%s\"' % '\\n'.join(output_lines))\n\n job_statuses = parse_qstat_plain_output(output_lines)\n\n assert len(job_statuses) == 1, \"qstat id did not return the expected number of job statuses: %s != 1\" % len(job_statuses)\n\n job_stat = job_statuses[0]\n assert job_stat.id == job_id, \"qstat job_id did no match expected job_id. %s != %s\" % (job_stat.id, job_id)\n\n return job_stat", "def get_async_job_result(self, account_id, job_id, batch=False):\n path = 'act_%s/reportstats' % account_id\n args = {\n 'report_run_id': job_id\n }\n return self.make_request(path, 'GET', args=args, batch=batch)", "def get_job(self, identifier: str):\n self._log_operation('Getting job {i}'.format(i=identifier))\n return self._job_queue.get_job_details(identifier)", "def output(self) -> pulumi.Output[Optional['outputs.JobStepOutputResponse']]:\n return pulumi.get(self, \"output\")", "def get_job_progress(self, job_id):\n\n url = self._base_url + urlConfig.URLS['Project'] + '/' + self._project_id + '/job/' + job_id + '/status'\n response = apiCall.get(self._get_token(), url,self._proxy, {}, 10)\n logging.debug('response = %s', response)\n return response", "def get(self, id):\n result_task = AsyncResult(id = id, app = backapp)\n state = result_task.state\n\n if state == states.STARTED:\n return { 'id':result_task.task_id, 'status': state }, 200\n # task still pending or unknown\n elif state == states.PENDING:\n return { 'id':result_task.task_id, 'status': state }, 200\n elif state == states.SUCCESS:\n return { 'id':result_task.task_id, 'status': state }, 303, {'Location': api.url_for(MathJobResult,id=result_task.task_id)}\n else:\n return error(result_task)", "def get_job_information(run_id):\n cmd = [github_cli, 'run', 'view', str(run_id), '--json', 'jobs']\n with subprocess.Popen(cmd, stdout=subprocess.PIPE) as p:\n result, err = p.communicate()\n print(err)\n return json.loads(result)['jobs']", "async def get_jobs(jobId: int) -> str: \n return mngr.getJob(str(jobId))", "def task_stdout(self, task_id):\n result, _ = self.task_collect(task_id, wait=False)\n return result['shards'][0]['output']", "def retrieve_job(self, job_id):\n job = {}\n with self._lock:\n if job_id not in self._jobs:\n return None\n job = self._jobs[job_id]\n return job", "def get_results(self, job_id):\n ujs = self.__ujs_client()\n res = ujs.get_results(job_id)\n return res", "def generateFinishOutput(self, job):\n return []", "def download_command_output(self, command_id):\n return self._get(endpoint='/command/{}/download'.format(command_id), endpoint_suffix='/cmf').json()", "def check_job_status(self, jobid=None):\n\n if jobid is None:\n if hasattr(self, 'current_job'):\n jobid = self.current_job\n else:\n jobid = self.current_job\n\n response = self._request(\n 'GET', CosmoSim.QUERY_URL + '/{}'.format(jobid) + '/phase',\n auth=(self.username, self.password), data={'print': 'b'},\n cache=False)\n\n log.info(\"Job {}: {}\".format(jobid, response.content))\n return response.content", "def GetResult(jobid, g_params): # {{{\n # retrieving result from the remote server for this job\n gen_logfile = g_params['gen_logfile']\n gen_errfile = g_params['gen_errfile']\n\n webcom.loginfo(f\"GetResult for {jobid}.\\n\", gen_logfile)\n\n path_static = g_params['path_static']\n path_result = os.path.join(path_static, 'result')\n path_cache = g_params['path_cache']\n finished_date_db = g_params['finished_date_db']\n name_server = g_params['name_server']\n\n rstdir = os.path.join(path_result, jobid)\n runjob_logfile = os.path.join(rstdir, \"runjob.log\")\n runjob_errfile = os.path.join(rstdir, \"runjob.err\")\n outpath_result = os.path.join(rstdir, jobid)\n if not os.path.exists(outpath_result):\n os.mkdir(outpath_result)\n\n remotequeue_idx_file = os.path.join(rstdir, \"remotequeue_seqindex.txt\")\n\n torun_idx_file = os.path.join(rstdir, \"torun_seqindex.txt\")\n finished_idx_file = os.path.join(rstdir, \"finished_seqindex.txt\")\n query_parafile = os.path.join(rstdir, \"query.para.txt\")\n\n query_para = {}\n if os.path.exists(query_parafile):\n content = myfunc.ReadFile(query_parafile)\n if content != \"\":\n try:\n query_para = json.loads(content)\n except ValueError:\n query_para = {}\n failed_idx_file = os.path.join(rstdir, \"failed_seqindex.txt\")\n\n starttagfile = os.path.join(rstdir, \"runjob.start\")\n cnttry_idx_file = os.path.join(rstdir, \"cntsubmittry_seqindex.txt\") # index file to keep log of tries\n tmpdir = os.path.join(rstdir, \"tmpdir\")\n finished_seq_file = os.path.join(outpath_result, \"finished_seqs.txt\")\n\n if not os.path.exists(tmpdir):\n os.mkdir(tmpdir)\n\n finished_info_list = [] # [info for finished record]\n finished_idx_list = [] # [origIndex]\n failed_idx_list = [] # [origIndex]\n resubmit_idx_list = [] # [origIndex]\n keep_queueline_list = [] # [line] still in queue\n\n cntTryDict = {}\n if os.path.exists(cnttry_idx_file):\n with open(cnttry_idx_file, 'r') as fpin:\n try:\n cntTryDict = json.load(fpin)\n except Exception:\n cntTryDict = {}\n\n # in case of missing queries, if remotequeue_idx_file is empty but the job\n # is still not finished, force recreating torun_idx_file\n if 'DEBUG' in g_params and g_params['DEBUG']:\n try:\n webcom.loginfo(\"DEBUG: %s: remotequeue_idx_file=%s, size(remotequeue_idx_file)=%d, content=\\\"%s\\\"\\n\" %(jobid, remotequeue_idx_file, os.path.getsize(remotequeue_idx_file), myfunc.ReadFile(remotequeue_idx_file)), gen_logfile)\n except Exception:\n pass\n if ((not os.path.exists(remotequeue_idx_file) or # {{{\n os.path.getsize(remotequeue_idx_file) < 1)):\n idlist1 = []\n idlist2 = []\n if os.path.exists(finished_idx_file):\n idlist1 = myfunc.ReadIDList(finished_idx_file)\n if os.path.exists(failed_idx_file):\n idlist2 = myfunc.ReadIDList(failed_idx_file)\n\n completed_idx_set = set(idlist1 + idlist2)\n\n jobinfofile = os.path.join(rstdir, \"jobinfo\")\n jobinfo = myfunc.ReadFile(jobinfofile).strip()\n jobinfolist = jobinfo.split(\"\\t\")\n if len(jobinfolist) >= 8:\n numseq = int(jobinfolist[3])\n\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: len(completed_idx_set)=%d+%d=%d, numseq=%d\\n\"%(len(idlist1), len(idlist2), len(completed_idx_set), numseq), gen_logfile)\n\n if len(completed_idx_set) < numseq:\n all_idx_list = [str(x) for x in range(numseq)]\n torun_idx_str_list = list(set(all_idx_list)-completed_idx_set)\n for idx in torun_idx_str_list:\n try:\n cntTryDict[int(idx)] += 1\n except (ValueError, IndexError, KeyError):\n cntTryDict[int(idx)] = 1\n myfunc.WriteFile(\"\\n\".join(torun_idx_str_list)+\"\\n\", torun_idx_file, \"w\", True)\n\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"recreate torun_idx_file: jobid = %s, numseq=%d, len(completed_idx_set)=%d, len(torun_idx_str_list)=%d\\n\"%(jobid, numseq, len(completed_idx_set), len(torun_idx_str_list)), gen_logfile)\n else:\n myfunc.WriteFile(\"\", torun_idx_file, \"w\", True)\n else:\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: %s: remotequeue_idx_file %s is not empty\\n\" %(jobid, remotequeue_idx_file), gen_logfile)\n# }}}\n\n text = \"\"\n if os.path.exists(remotequeue_idx_file):\n text = myfunc.ReadFile(remotequeue_idx_file)\n if text == \"\":\n return 1\n lines = text.split(\"\\n\")\n\n nodeSet = set([])\n for i in range(len(lines)):\n line = lines[i]\n if not line or line[0] == \"#\":\n continue\n strs = line.split(\"\\t\")\n if len(strs) != 6:\n continue\n node = strs[1]\n nodeSet.add(node)\n\n myclientDict = {}\n for node in nodeSet:\n wsdl_url = f\"http://{node}/pred/api_submitseq/?wsdl\"\n try:\n myclient = Client(wsdl_url, cache=None, timeout=30)\n myclientDict[node] = myclient\n except Exception as e:\n webcom.loginfo(f\"Failed to access {wsdl_url} with errmsg {e}\", gen_logfile)\n pass\n\n for i in range(len(lines)): # {{{\n line = lines[i]\n\n if 'DEBUG' in g_params and g_params['DEBUG']:\n myfunc.WriteFile(f\"Process {line}\\n\", gen_logfile, \"a\", True)\n if not line or line[0] == \"#\":\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: line empty or line[0] = '#', ignore\", gen_logfile)\n continue\n strs = line.split(\"\\t\")\n if len(strs) != 6:\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: len(strs)=%d (!=6), ignore\\n\"%(len(strs)), gen_logfile)\n continue\n origIndex = int(strs[0])\n node = strs[1]\n remote_jobid = strs[2]\n description = strs[3]\n seq = strs[4]\n submit_time_epoch = float(strs[5])\n subfoldername_this_seq = f\"seq_{origIndex}\"\n outpath_this_seq = os.path.join(outpath_result, subfoldername_this_seq)\n\n try:\n myclient = myclientDict[node]\n except KeyError:\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: node (%s) not found in myclientDict, ignore\"%(node), gen_logfile)\n keep_queueline_list.append(line)\n continue\n try:\n rtValue = myclient.service.checkjob(remote_jobid)\n except Exception as e:\n msg = \"checkjob(%s) at node %s failed with errmsg %s\"%(remote_jobid, node, str(e))\n webcom.loginfo(msg, gen_logfile)\n rtValue = []\n pass\n isSuccess = False\n isFinish_remote = False\n status = \"\"\n if len(rtValue) >= 1:\n ss2 = rtValue[0]\n if len(ss2) >= 3:\n status = ss2[0]\n result_url = ss2[1]\n errinfo = ss2[2]\n\n if errinfo and errinfo.find(\"does not exist\") != -1:\n if 'DEBUG' in g_params and g_params['DEBUG']:\n msg = \"Failed for remote_jobid %s with errmsg %s\"%(remote_jobid, str(errinfo))\n webcom.loginfo(msg, gen_logfile)\n\n isFinish_remote = True\n\n if status == \"Finished\": # {{{\n isFinish_remote = True\n outfile_zip = f\"{tmpdir}/{remote_jobid}.zip\"\n isRetrieveSuccess = False\n myfunc.WriteFile(\"\\tFetching result for %s/seq_%d from %s \" % (\n jobid, origIndex, result_url), gen_logfile, \"a\", True)\n if myfunc.IsURLExist(result_url, timeout=5):\n try:\n myfunc.urlretrieve(result_url, outfile_zip, timeout=10)\n isRetrieveSuccess = True\n myfunc.WriteFile(f\" succeeded on node {node}\\n\", gen_logfile, \"a\", True)\n except Exception as e:\n myfunc.WriteFile(\" failed with %s\\n\"%(str(e)), gen_logfile, \"a\", True)\n pass\n if os.path.exists(outfile_zip) and isRetrieveSuccess:\n cmd = [\"unzip\", outfile_zip, \"-d\", tmpdir]\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n rst_fetched = os.path.join(tmpdir, remote_jobid)\n if name_server.lower() == \"pconsc3\":\n rst_this_seq = rst_fetched\n elif name_server.lower() == \"boctopus2\":\n rst_this_seq = os.path.join(rst_fetched, \"seq_0\", \"seq_0\")\n rst_this_seq_parent = os.path.join(rst_fetched, \"seq_0\")\n else:\n rst_this_seq = os.path.join(rst_fetched, \"seq_0\")\n\n if os.path.islink(outpath_this_seq):\n os.unlink(outpath_this_seq)\n elif os.path.exists(outpath_this_seq):\n shutil.rmtree(outpath_this_seq)\n\n if os.path.exists(rst_this_seq) and not os.path.exists(outpath_this_seq):\n cmd = [\"mv\", \"-f\", rst_this_seq, outpath_this_seq]\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n if name_server.lower() == \"boctopus2\":\n # move also seq.fa and time.txt for boctopus2\n file1 = os.path.join(rst_this_seq_parent, \"seq.fa\")\n file2 = os.path.join(rst_this_seq_parent, \"time.txt\")\n for f in [file1, file2]:\n if os.path.exists(f):\n try:\n shutil.move(f, outpath_this_seq)\n except:\n pass\n\n fafile_this_seq = os.path.join(outpath_this_seq, \"seq.fa\")\n if webcom.IsCheckPredictionPassed(outpath_this_seq, name_server):\n # relpace the seq.fa with original description\n myfunc.WriteFile('>%s\\n%s\\n'%(description, seq), fafile_this_seq, 'w', True)\n isSuccess = True\n\n if isSuccess:\n # delete the data on the remote server\n try:\n rtValue2 = myclient.service.deletejob(remote_jobid)\n except Exception as e:\n msg = \"Failed to deletejob(%s) on node %s with errmsg %s\"%(remote_jobid, node, str(e))\n webcom.loginfo(msg, gen_logfile)\n rtValue2 = []\n pass\n\n logmsg = \"\"\n if len(rtValue2) >= 1:\n ss2 = rtValue2[0]\n if len(ss2) >= 2:\n status = ss2[0]\n errmsg = ss2[1]\n if status == \"Succeeded\":\n logmsg = \"Successfully deleted data on %s \"\\\n \"for %s\"%(node, remote_jobid)\n else:\n logmsg = \"Failed to delete data on %s for \"\\\n \"%s\\nError message:\\n%s\\n\"%(node, remote_jobid, errmsg)\n else:\n logmsg = \"Failed to call deletejob %s via WSDL on %s\\n\"%(remote_jobid, node)\n\n # delete the downloaded temporary zip file and\n # extracted file\n if os.path.exists(outfile_zip):\n os.remove(outfile_zip)\n if os.path.exists(rst_fetched):\n shutil.rmtree(rst_fetched)\n\n # create or update the md5 cache\n if name_server.lower() == \"prodres\" and query_para != {}:\n md5_key = hashlib.md5((seq+str(query_para)).encode('utf-8')).hexdigest()\n else:\n md5_key = hashlib.md5(seq.encode('utf-8')).hexdigest()\n subfoldername = md5_key[:2]\n md5_subfolder = \"%s/%s\"%(path_cache, subfoldername)\n cachedir = \"%s/%s/%s\"%(path_cache, subfoldername, md5_key)\n\n # copy the zipped folder to the cache path\n origpath = os.getcwd()\n os.chdir(outpath_result)\n shutil.copytree(\"seq_%d\"%(origIndex), md5_key)\n cmd = [\"zip\", \"-rq\", \"%s.zip\"%(md5_key), md5_key]\n webcom.RunCmd(cmd, runjob_logfile, runjob_errfile)\n if not os.path.exists(md5_subfolder):\n os.makedirs(md5_subfolder)\n shutil.move(\"%s.zip\"%(md5_key), \"%s.zip\"%(cachedir))\n shutil.rmtree(md5_key) # delete the temp folder named as md5 hash\n os.chdir(origpath)\n\n # Add the finished date to the database\n date_str = time.strftime(g_params['FORMAT_DATETIME'])\n MAX_TRY_INSERT_DB = 3\n cnttry = 0\n while cnttry < MAX_TRY_INSERT_DB:\n t_rv = webcom.InsertFinishDateToDB(date_str, md5_key, seq, finished_date_db)\n if t_rv == 0:\n break\n cnttry += 1\n time.sleep(random.random()/1.0)\n\n# }}}\n elif status in [\"Failed\", \"None\"]:\n # the job is failed for this sequence, try to resubmit\n isFinish_remote = True\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: %s, status = %s\\n\"%(remote_jobid, status), gen_logfile)\n\n if status != \"Wait\" and not os.path.exists(starttagfile):\n webcom.WriteDateTimeTagFile(starttagfile, runjob_logfile, runjob_errfile)\n\n if isSuccess: # {{{\n time_now = time.time()\n runtime1 = time_now - submit_time_epoch # in seconds\n timefile = os.path.join(outpath_this_seq, \"time.txt\")\n runtime = webcom.ReadRuntimeFromFile(timefile, default_runtime=runtime1)\n info_finish = webcom.GetInfoFinish(\n name_server, outpath_this_seq,\n origIndex, len(seq), description,\n source_result=\"newrun\", runtime=runtime)\n finished_info_list.append(\"\\t\".join(info_finish))\n finished_idx_list.append(str(origIndex))\n # }}}\n\n # if the job is finished on the remote but the prediction is failed,\n # try resubmit a few times and if all failed, add the origIndex to the\n # failed_idx_file\n if isFinish_remote and not isSuccess:\n cnttry = 1\n try:\n cnttry = cntTryDict[int(origIndex)]\n except KeyError:\n cnttry = 1\n if cnttry < g_params['MAX_RESUBMIT']:\n resubmit_idx_list.append(str(origIndex))\n cntTryDict[int(origIndex)] = cnttry+1\n else:\n failed_idx_list.append(str(origIndex))\n\n if not isFinish_remote:\n time_in_remote_queue = time.time() - submit_time_epoch\n # for jobs queued in the remote queue more than one day (but not\n # running) delete it and try to resubmit it. This solved the\n # problem of dead jobs in the remote server due to server\n # rebooting)\n if (\n status != \"Running\"\n and status != \"\"\n and time_in_remote_queue > g_params['MAX_TIME_IN_REMOTE_QUEUE']):\n # delete the remote job on the remote server\n try:\n rtValue2 = myclient.service.deletejob(remote_jobid)\n except Exception as e:\n webcom.loginfo(\"Failed to run myclient.service.deletejob(%s) on node %s with msg %s\"%(remote_jobid, node, str(e)), gen_logfile)\n rtValue2 = []\n pass\n else:\n keep_queueline_list.append(line)\n# }}}\n # Finally, write log files\n finished_idx_list = list(set(finished_idx_list))\n failed_idx_list = list(set(failed_idx_list))\n resubmit_idx_list = list(set(resubmit_idx_list))\n\n if len(finished_info_list) > 0:\n myfunc.WriteFile(\"\\n\".join(finished_info_list)+\"\\n\", finished_seq_file,\n \"a\", True)\n if len(finished_idx_list) > 0:\n myfunc.WriteFile(\"\\n\".join(finished_idx_list)+\"\\n\", finished_idx_file,\n \"a\", True)\n if len(failed_idx_list) > 0:\n myfunc.WriteFile(\"\\n\".join(failed_idx_list)+\"\\n\", failed_idx_file, \"a\",\n True)\n if len(resubmit_idx_list) > 0:\n myfunc.WriteFile(\"\\n\".join(resubmit_idx_list)+\"\\n\", torun_idx_file,\n \"a\", True)\n\n if len(keep_queueline_list) > 0:\n keep_queueline_list = list(set(keep_queueline_list))\n myfunc.WriteFile(\"\\n\".join(keep_queueline_list)+\"\\n\",\n remotequeue_idx_file, \"w\", True)\n else:\n myfunc.WriteFile(\"\", remotequeue_idx_file, \"w\", True)\n\n with open(cnttry_idx_file, 'w') as fpout:\n json.dump(cntTryDict, fpout)\n\n return 0", "def get(self, job_id):\n try:\n result = jobs.find_job_by_id(job_id)\n\n if result is None:\n return get_message_json('任务不存在'), HTTPStatus.NOT_FOUND\n\n # Admin can retrieve any job,\n # while others can only retrieve his own job\n if not current_user.is_admin() and result.account_id != current_user.account_id:\n return get_message_json('用户无法访问他人任务'), HTTPStatus.FORBIDDEN\n\n json_res = result.to_json()\n json_res['message'] = '成功查看任务'\n return json_res, HTTPStatus.OK\n\n except Exception as err:\n return handle_internal_error(str(err))", "def get_job_def_by_id(self, job_id):\n try:\n result = self._session.query(\n JobEntity.id,\n JobEntity.username,\n JobEntity.name,\n JobEntity.workflow_id,\n WorkflowEntity.name,\n JobEntity.output_uri,\n JobEntity.work_uri,\n JobEntity.no_output_hash,\n JobEntity.inputs,\n JobEntity.parameters,\n JobEntity.final_output,\n JobEntity.exec_context,\n JobEntity.exec_method,\n JobEntity.exec_parameters,\n JobEntity.notifications\n ).\\\n filter(JobEntity.id == job_id).\\\n filter(WorkflowEntity.id == JobEntity.workflow_id).\\\n all()\n\n result_dict = [\n {\n 'job_id': row[0],\n 'username': row[1],\n 'name': row[2],\n 'workflow_id': row[3],\n 'workflow_name': row[4],\n 'output_uri': row[5],\n 'work_uri': json.loads(row[6]),\n 'no_output_hash': row[7],\n 'inputs': json.loads(row[8]),\n 'parameters': json.loads(row[9]),\n 'final_output': json.loads(row[10]),\n 'execution': {\n 'context': json.loads(row[11]),\n 'method': json.loads(row[12]),\n 'parameters': json.loads(row[13])\n },\n 'notifications': json.loads(row[14])\n } for row in result\n ]\n\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n # should have just one record\n if not result_dict:\n return {}\n\n return result_dict[0]", "def job_display(self, job_id):\n job = self.backend.get_job(job_id)\n process_graph_job = self.backend.job_pg_info(job_id)\n download_dir = self.backend.job_result_download(job_id)\n failed_files = []\n if download_dir:\n for ddir in download_dir:\n info(self.iface, \"Downloaded to {}\".format(ddir))\n result = Result(path=ddir, process_graph=process_graph_job)\n if iface.activeLayer():\n crs_background = iface.activeLayer().crs().authid()\n QSettings().setValue('/Projections/defaultBehaviour', 'useGlobal')\n QSettings().setValue('/Projections/layerDefaultCrs', crs_background)\n else:\n QSettings().setValue('/Projections/defaultBehaviour', 'useGlobal')\n QSettings().setValue('/Projections/layerDefaultCrs', 'EPSG:4326')\n\n if job.title:\n title = job.title\n else:\n title = \"NoTitle\"\n\n if not result.display(layer_name=\"{}-{}\".format(title, job.created.strftime(\"%Y-%m-%d_%H-%M-%S\"))):\n failed_files.append(ddir)\n iface.zoomToActiveLayer()\n\n if failed_files:\n warning(self.iface, \"The following result files could not be loaded to layer: {}\"\n .format(str(failed_files).replace(\"[\", \"\").replace(\"]\", \"\")))\n\n self.refresh_jobs()", "def get_job(jid=None):\n if not jid:\n raise CommandExecutionError(\"ID option must not be none.\")\n\n query = {\"type\": \"op\", \"cmd\": \"<show><jobs><id>{}</id></jobs></show>\".format(jid)}\n\n return __proxy__[\"panos.call\"](query)", "def download(job_id, filename):\n client = connect()\n result = client.get_job_output(\n vaultName=VAULT_NAME,\n jobId=str(job_id))\n\n with open(filename) as f:\n f.write(result['Body'].read())\n print(\"File successfully downloaded.\")", "def run(self,id=None):\n # loop until the process is running or halted.\n while 1:\n\n my_job_status, my_job = self.find_job_and_job_status()\n\n if not my_job_status:\n time.sleep(5)\n continue\n\n if sum(map(lambda st: int(st==my_job_status), self.return_status)) > 0:\n return (my_job_status, my_job.printOld())\n\n time.sleep(5)\n continue", "def run(self, job_id, **kwargs):\n\n extra_vars = {'job_id': job_id}\n extra_vars.update(kwargs)\n\n self._variable_manager.extra_vars = extra_vars\n # set the custom callback plugin\n self._pbex._tqm._stdout_callback = self._results_callback\n self._pbex.run()\n\n return self._pbex._tqm._stats", "def describe_job(self):\n # GET /jobs/{job_id}\n pass", "def get_job_details(self, job_id):\n try:\n LOG.info('Getting Job Details for job_id %s ', job_id)\n job_details = self.common.get_job_by_id(job_id)\n if job_details:\n LOG.info('Successfully listed Job Details for job_id %s : %s',\n job_id, job_details)\n return job_details\n else:\n errorMsg = 'Failed to find the job with specified job_id: %s'\\\n % job_id\n self.show_error_exit(msg=errorMsg)\n except Exception as e:\n errorMsg = 'Get Job details for job_id %s failed with error %s' \\\n % (job_id, str(e))\n self.show_error_exit(msg=errorMsg)", "def job_full_command(job_id):\n return subprocess.check_output(['at', '-c', job_id], text=True, encoding='latin-1')", "def poll(self, job_id):\n return self.manage.poll_job(job_id=job_id)", "def examine_job(self):\n if self.data is None:\n print(\"Could not download job id\", self.job_id)\n return\n self.duration = self.read_data(\"duration\")\n self.between_commands = self.read_data(\"between_commands\")\n\n print(\"---\")\n print(\"test_id: {}\".format(self.job_id))\n print(\"duration:\")\n Job.print_results(self.duration)\n print(\"between_commands:\")\n Job.print_results(self.between_commands)\n print(\"\")", "def get_job(self, id, jobstore=None):\n\n return self._scheduler.get_job(id, jobstore)", "def qoutput(self):\n jobid = self.jobid()\n ou = os.path.join(self.directory, jobid + '.OU')\n if not self.in_queue() and os.path.exists(ou):\n with open(ou) as f:\n return f.read()\n else:\n return \"In queue or no output found.\"", "def job_download(self, job_id):\n\n target = QFileDialog.getExistingDirectory(self, 'Where to save the resulting files?')\n if target:\n paths = self.backend.job_result_download(job_id, target)\n info(self.iface, \"Successfully Downloaded to {}\".format(paths))", "def job_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"job_id\")", "def store_job_out(self, job_id: Hashable, value: Any) -> None:\n self.store_job(job_id, key=\"out\", value=value)", "def finish_job(\n self, job_id, error_message=None, error_code=None, error=None, job_output=None\n ):\n\n if not job_id:\n raise ValueError(\"Please provide valid job_id\")\n\n job = self.get_mongo_util().get_job(job_id=job_id)\n self._test_job_permissions(job, job_id, JobPermissions.WRITE)\n self._check_job_is_running(job_id=job_id)\n\n if error_message:\n if error_code is None:\n error_code = ErrorCode.job_crashed.value\n self._finish_job_with_error(\n job_id=job_id,\n error_message=error_message,\n error_code=error_code,\n error=error,\n )\n elif job_output is None:\n if error_code is None:\n error_code = ErrorCode.job_missing_output.value\n msg = \"Missing job output required in order to successfully finish job. Something went wrong\"\n self._finish_job_with_error(\n job_id=job_id, error_message=msg, error_code=error_code\n )\n raise ValueError(msg)\n else:\n self._finish_job_with_success(job_id=job_id, job_output=job_output)", "def qdel(job_id):\n ssh = connect_server()\n if isinstance(job_id, JobStatus):\n i,o,e = ssh.exec_command(qdel_c + ' ' + job_id.id)\n else:\n i,o,e = ssh.exec_command(qdel_c + ' ' + job_id)\n\n qdel_output = o.readlines() + e.readlines()\n ssh.close()", "def result_json(self, job_id: str, show_progress: bool = False) -> str:\n self.wait_for_processing(job_id, show_progress)\n response = self._session().get(self._status_url(job_id))\n return response.json()", "def cli(ctx, job_id):\n return ctx.gi.jobs.get_inputs(job_id)", "def get_job_by_id(self, job_id):\n return self.get_resource(category=SYSTEM, resource_level=JOB,\n resource_level_id=job_id)", "def get(self, job_id):\n\n if job_id:\n status = {\"state\": self.runner_service.status(job_id)}\n else:\n # TODO: Update the correct status for all jobs; the filtering in jobrunner doesn't work here.\n all_status = self.runner_service.status_all()\n status_dict = {}\n for k, v in all_status.iteritems():\n status_dict[k] = {\"state\": v}\n status = status_dict\n\n self.write_json(status)", "def get(self, id):\n\n return self.client.get(\"external-task/{0}\".format(id))", "def request_result(job_id):\n result = _database_operations.get_results(job_id, Session())\n if result is None:\n flask.abort(404)\n else:\n return result", "def get_step_output(self, execution_id, step_name, output_key):\n execution = self.ssm_client.get_automation_execution(\n AutomationExecutionId=execution_id\n )\n step_executions = execution['AutomationExecution']['StepExecutions']\n step = self._get_step_by_name(step_executions, step_name)\n if step and step.get('Outputs') and step.get('Outputs').get(output_key):\n return step['Outputs'][output_key][0]", "def get_job(self) -> Dict[Text, Text]:\n request = self._client.projects().jobs().get(name=self._job_name)\n return request.execute()", "def _job_id_or_out(out):\n\n stdout = re.sub(\"[^0-9]\", \"\", str(out))\n if not stdout:\n stdout = out\n return stdout", "def test_get_job_by_id(self):\n response = self.client.open(\n '/tx-queue/2/scheduler/job/{jobId}'.format(jobId=1),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def get_job(self, _id):\n data = {\n 'class': 'Job',\n 'id': _id,\n 'attrs': {},\n }\n job = self.db_client.send_request('list', json.dumps(data))\n\n return Job(\n _id=job['id'],\n _type=job['type'],\n task=job['task'],\n command=job['command'],\n input_parameters=job['inputParameters'],\n status=job['status'],\n runner_id=job['runner'],\n )", "def __call__(self, job_id: str):\n job = self.job_queue.get()\n if job.theta is None:\n logits = job.offsets\n else:\n logits = self.lr_model.predict_proba(X=job.X, offsets=job.offsets, custom_theta=job.theta,\n return_logits=True)\n logits_per_coordinate = logits - job.offsets\n inc_count(self)\n return self._inference_results(job.y, logits, job.weights.flatten(), job.ids.flatten(), logits_per_coordinate)", "def get(self, id):\n result_task = AsyncResult(id = id, app = backapp)\n state = result_task.state\n\n # tasks finished so result exists\n if state == states.SUCCESS:\n return { 'id': result_task.task_id, 'status': state, 'result': result_task.get(timeout=1.0)}, 200\n # task still pending or unknown - so result do not exists\n elif state == states.PENDING:\n return { 'id': result_task.task_id, 'status': state }, 404\n # task started but result do not exists yet\n elif state == states.STARTED:\n return { 'id': result_task.task_id, 'status': state }, 404\n else:\n return error(result_task)", "def output(self) -> Optional[pulumi.Input['JobStepOutputArgs']]:\n return pulumi.get(self, \"output\")", "def _wait_for_query_finish(self, job_id: str, max_wait: int = 60) -> str:\n url_inputs = {'redash_host': self.redash_host, 'job_id': job_id}\n query_url = REDASH_TRACK_JOB_ENDPOINT.format(**url_inputs)\n\n query_result_id: Optional[str] = None\n max_time = time.time() + max_wait\n\n while time.time() < max_time:\n resp = r.get(query_url, headers=self.headers)\n resp_json = resp.json()\n\n LOGGER.debug('Received response from Redash job %s: %s', job_id, resp_json)\n\n job_info = resp_json['job']\n job_status = RedashApiResponse(job_info['status'])\n\n if job_status == RedashApiResponse.SUCCESS:\n query_result_id = job_info['query_result_id']\n break\n\n elif job_status == RedashApiResponse.FAILURE:\n raise RedashQueryCouldNotCompleteException(job_info['error'])\n time.sleep(.5)\n\n if query_result_id is None:\n raise RedashQueryCouldNotCompleteException('Query execution took too long')\n\n return query_result_id", "def get_status(self, job_id):\n\n result = self.redis.get('job_status:' + str(job_id))\n return pickle.loads(result) if result else None", "def load_job(self, job_id: Hashable) -> dict:\n data = self._redis.json().get(f\"job:{job_id}\", \".\")\n return data", "def get_output(self):\n if self._is_started and self._is_terminated:\n job_f = self._outerr_files.replace('%J', str(self._jobid))\n job_out_f = job_f + '.out'\n job_err_f = job_f + '.err'\n job_out = \"\"\n if self.out is None and self.err is None:\n try:\n with open(job_out_f, 'r') as f:\n job_out = f.read()\n except FileNotFoundError:\n sys.stderr.write(\"File not found : {0}\\n\".format(job_out_f))\n sys.stderr.flush()\n job_err = \"\"\n try:\n with open(job_err_f, 'r') as f:\n job_err = f.read()\n except FileNotFoundError:\n sys.stderr.write(\"File not found : {0}\\n\".format(job_err_f))\n sys.stderr.flush()\n self.out, self.err = job_out, job_err\n return(self.out, self.err)\n else:\n return(None, None)", "def load_out_from_all_jobs(self, search_id: Hashable) -> List[Any]:\n jobs_ids = self.load_all_job_ids(search_id)\n values = []\n for job_id in jobs_ids:\n try:\n value = self._redis.json().get(f\"job:{job_id}\", \".out\")\n except redis.exceptions.ResponseError:\n value = None\n\n if value is not None:\n values.append(value)\n return values", "def outputRetrieved(self, blTaskName, rng):\n return self._genericCommand('outputRetrieved', blTaskName, rng)", "def job(self):\n return self.batch[self.job_id]", "def GetJobStatus(self, job_id):\n return self._SendRequest(HTTP_GET,\n \"/%s/jobs/%s\" % (GANETI_RAPI_VERSION, job_id),\n None, None)", "def get_job(self, job_id):\n\n try:\n exposure = Job.objects.filter(id=job_id)\n except:\n exposure = None\n\n return exposure", "async def get_result(request):\n job_id = request.match_info['job_id']\n r = redis.Redis(\n host=os.environ['REDIS_HOST'],\n port=6379,\n decode_responses=True,\n )\n if not r.exists(job_id):\n return web.HTTPNotFound(text='Results are unavailable.')\n output_id = r.get(job_id)\n filename = output_id + '.json'\n try:\n with open(os.path.join(CACHE_DIR, filename), 'r') as f:\n response = json.load(f)\n except FileNotFoundError:\n # Redis is out-of-sync with file system. Remove the offending key.\n r.delete(job_id)\n return web.HTTPNotFound(text='Results are unavailable.')\n return web.json_response(response, dumps=functools.partial(json.dumps, indent=4))", "async def get_task_result(task_id: TaskId):", "def checkjob(self, taskid):\n\t\tfrom subprocess import Popen,PIPE\n\t\timport os\n\n\t\ttry:\n\t\t\tp = self.qstatoutput\n\t\texcept:\n\t\t\t#command = [ 'qstat','-j',id ]\n\t\t\tcommand = [ 'qstat','-u',os.getenv(\"USER\"),'-g','d' ]\n\t\t\tp = Popen( command ,stdout=PIPE,stderr=PIPE ).communicate()\n\t\t\tself.qstatoutput = p\n\n\t\tisincluster = False\n\t\ttaskstatus = {}\n\t\tfor line in p[0].split(\"\\n\"):\n\t\t\tif not str(self.jobsid) in line:\n\t\t\t\tcontinue\n\t\t\tparseline = line.split()\n\t\t\tstatus= parseline[4]\n\t\t\ttry:\n\t\t\t\ttask = int(parseline[9])\n\t\t\texcept IndexError:\n\t\t\t\t# Implies it is still waiting\n\t\t\t\ttask = int(parseline[8])\n\t\t\ttaskstatus[task] = status\n\t\t\tisincluster = True\n\n\t\tif not isincluster:\n\t\t\t# Checking if the outputfiles are there\n\t\t\tif not os.path.exists(self.outputfiles[taskid]):\n\t\t\t\tmessage = \"\\033[1;31mclustermanager.checkjob: Something went wrong in the cluster:\\033[1;m\"\n\t\t\t\tmessage += \"The task '\"+str(taskid)+\"' of the job '\"+str(self.jobsid)\n\t\t\t\tmessage += \"' is already finish but there is no output root file '\"\n\t\t\t\tmessage += self.outputfiles[taskid]+\"'\\n\"\n\t\t\t\tmessage += \"Check the cluster outputs file\"\n\t\t\t\traise message\n\n\t\t\t# Gathering the file outputs in order to add\n\t\t\tself.taskstatus[\"Done\"].append(taskid)\n\t\t\treturn self.outputfiles[taskid]\n\n\t\t# Still in cluster\n\t\t#statustaskdict = dict( [ (status,[]) for status in taskstatus.values() ] )\n\t\tfor task,status in taskstatus.iteritems():\n\t\t\tif status == \"r\" or status == \"t\":\n\t\t\t\tself.taskstatus[\"r\"].append(task)\n\t\t\telif status == \"qw\":\n\t\t\t\tself.taskstatus[\"qw\"].append(task)\n\t\t\telse:\n\t\t\t\tself.taskstatus[\"Undefined\"].append(task)", "def job_status(self, job_id):\n url = self.base_url + \"/ml-service/phoenix-ml/job/status?id={0}\".format(job_id)\n # url = \"http://10.1.2.110:8199/phoenix-ml/job/status?id=12\"\n headers = {\"ApiKey\": self.api_key}\n response = requests.get(url=url, headers=headers)\n return response.text", "def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"job_id\", type=str, location=\"args\")\n args = parser.parse_args()\n job_id = args[\"job_id\"]\n if job_id is None:\n return errors.all_errors(\n \"CLIENT_MISSING_PARAMETER\", \"job_id (str) parameter is required\"\n )\n\n try:\n qstat_command = config.Config.PBS_QSTAT + \" -f \" + job_id + \" -Fjson\"\n try:\n get_job_info = subprocess.check_output(shlex.split(qstat_command))\n try:\n sanitize_input = get_job_info.decode(\"utf-8\")\n for match in re.findall(\n '\"project\":(\\d+),', sanitize_input, re.MULTILINE\n ):\n # Clear case where project starts with digits to prevent leading zero errors\n print(\n f'Detected \"project\":{match}, > Will be replaced to prevent int leading zero error'\n )\n sanitize_input = sanitize_input.replace(\n f'\"project\":{match},', f'\"project\":\"{match}\",'\n )\n\n job_info = ast.literal_eval(sanitize_input)\n except Exception as err:\n return {\n \"success\": False,\n \"message\": \"Unable to retrieve this job. Job may have terminated. Error: \"\n + str(job_info),\n }, 210\n\n job_key = list(job_info[\"Jobs\"].keys())[0]\n return {\"success\": True, \"message\": job_info[\"Jobs\"][job_key]}, 200\n\n except Exception as err:\n return {\n \"success\": False,\n \"message\": \"Unable to retrieve Job ID (job may have terminated and is no longer in the queue)\",\n }, 210\n\n except Exception as err:\n return {\"success\": False, \"message\": \"Unknown error: \" + str(err)}, 500", "def get_latest_job_tick(self, job_origin_id):", "def task_result(self, task_id):\n result, _ = self.task_collect(task_id, wait=False, include_output=False)\n return result['shards'][0]", "def _is_job_finished(self, job_id):\n complete, rc, status, result, task = False, 0, None, None, None\n job = self.get_job_by_id(job_id)\n if job:\n status = job['status']\n try:\n result, task = job['result'], job['task']\n except KeyError:\n pass\n if status.lower() == SUCCEEDED:\n complete = True\n elif status.lower() in INCOMPLETE_LIST:\n complete = False\n else:\n rc, complete = -1, True\n return complete, result, rc, status, task", "def get(self, id):\n return read_msg(id)", "def output(d):\n try:\n current_worker().output(d)\n except AttributeError:\n pass", "def get_async_job_status(self, job_id, batch=False):\n path = '%s' % job_id\n return self.make_request(path, 'GET', batch=batch)", "def complete_job(self, command_dict):\n job_uuid = command_dict['job_uuid']\n try:\n job = Job[job_uuid]\n except KeyError as e:\n # Job not found is not worth re-raising\n logger.warn(e)\n logger.warn(\"Job {} missing\".format(job_uuid))\n return\n\n logger.info(\"job {} finished with status of {}\".format(job.uuid,\n job.status))\n # Get the job log from the worker\n logger.info(\"retrieving log for job {}\".format(job.uuid))\n job_data_dir = os.path.join(self.data_dir, job.uuid)\n if(not os.path.exists(job_data_dir)):\n os.mkdir(job_data_dir)\n\n fetch_file_from_url(job.log_url(), job_data_dir)\n\n # Now get the job output data from the worker\n if(job.status == Job.STATUS_PROCESSED):\n\n logger.info(\"retrieving output for job {}\".format(job.uuid))\n fetch_file_from_url(job.download_url(), job_data_dir)\n job.status = Job.STATUS_COMPLETE\n\n job.on_primary = True\n # save job\n Job[job.uuid] = job", "async def log_detail(request, job_id=None, task_name=None, log_id=None):\n jobs = dagobah._serialize().get('jobs', {})\n job = [job for job in jobs if str(job['job_id']) == job_id][0]\n return template('log_detail.html',\n job=job,\n task_name=task_name,\n task=[task for task in job['tasks']\n if task['name'] == task_name][0],\n log_id=log_id)", "def _query_jobOutput(self, job_url):\n print('WaPOR API: _query_jobOutput')\n\n request_url = job_url\n\n ijob = 0\n contiue = True\n wait_time = 0\n if self.print_job:\n print(request_url)\n\n while contiue:\n # requests\n try:\n resq = requests.get(\n request_url)\n resq.raise_for_status()\n except requests.exceptions.HTTPError as err:\n raise Exception(\"WaPOR API Http Error: {e}\".format(e=err))\n except requests.exceptions.ConnectionError as err:\n raise Exception(\"WaPOR API Error Connecting: {e}\".format(e=err))\n except requests.exceptions.Timeout as err:\n raise Exception(\"WaPOR API Timeout Error: {e}\".format(e=err))\n except requests.exceptions.RequestException as err:\n raise Exception(\"WaPOR API OOps: Something Else {e}\".format(e=err))\n else:\n resq_json = resq.json()\n try:\n resp = resq_json['response']\n # print(resp)\n\n if resq_json['message'] == 'OK':\n jobType = resp['type']\n\n if self.print_job:\n print('WaPOR API: {i} {t}sec {s}'.format(\n i=ijob, t=wait_time, s=resp['status']))\n\n if resp['status'] == 'COMPLETED':\n contiue = False\n print('WaPOR API: {t}sec {s}'.format(\n t=wait_time, s=resp['status']))\n\n if jobType == 'CROP RASTER':\n output = resp['output']['downloadUrl']\n elif jobType == 'AREA STATS':\n results = resp['output']\n output = pd.DataFrame(\n results['items'], columns=results['header'])\n else:\n print('WaPOR API ERROR: Invalid jobType {t}'.format(\n t=jobType))\n return output\n elif resp['status'] == 'COMPLETED WITH ERRORS':\n contiue = False\n print('WaPOR API: {t}sec {s}'.format(\n t=wait_time, s=resp['status']))\n\n print(resp['log'][-1])\n elif resp['status'] == 'WAITING':\n contiue = True\n if wait_time % 60 == 0:\n print('WaPOR API: {t}sec {s}'.format(\n t=wait_time, s=resp['status']))\n\n time.sleep(TIME_SLEEP_SECOND)\n wait_time += TIME_SLEEP_SECOND\n if wait_time > TIME_REQUEST_AFTER_SECOND:\n contiue = False\n print(resp['log'][-1])\n elif resp['status'] == 'RUNNING':\n contiue = True\n if wait_time % 60 == 0:\n print('WaPOR API: {t}sec {s}'.format(\n t=wait_time, s=resp['status']))\n\n time.sleep(TIME_SLEEP_SECOND)\n wait_time += TIME_SLEEP_SECOND\n if wait_time > TIME_REQUEST_AFTER_SECOND:\n contiue = False\n print(resp['log'][-1])\n else:\n raise Exception('WaPOR API ERROR:'\n ' Unkown status'\n ' \"{s}\".'.format(s=resp['status']))\n else:\n print(resq_json['message'])\n except BaseException:\n print('WaPOR API ERROR: Cannot get {url}'.format(url=request_url))\n\n ijob += 1", "def fetchJob(self):\n \n mpDlg = MultipleValDialog(title='Get Job',\n initialvalues=('','my job1'),\n labels=('ID','Your label',),\n types=('string','string'),\n parent=self.mainwin)\n if mpDlg.result == True:\n jobid = mpDlg.results[0]\n name = mpDlg.results[1]\n else:\n return\n job = PEATSA.WebApp.Data.Job(jobid, self.connection) \n if job != None: \n print 'adding job id %s to list' %job.identification\n self.storeJob(name, job)\n self.updateJobs()\n return", "def DownloadJobResultsAsCsv(api, job_ids, output_file):\n with open(output_file, 'wb') as f:\n writer = csv.writer(f)\n writer.writerow(('job_id', 'change', 'isolate') + histograms_df.COLUMNS)\n num_rows = 0\n for job_id in job_ids:\n job = api.pinpoint.Job(job_id, with_state=True)\n # TODO: Make this also work for jobs that ran on windows platform.\n results_file = posixpath.join(\n job['arguments']['benchmark'], 'perf_results.json')\n print 'Fetching results for %s job %s:' % (job['status'].lower(), job_id)\n for change_id, isolate_hash in job_results.IterTestOutputIsolates(job):\n print '- isolate: %s ...' % isolate_hash\n histograms = api.isolate.RetrieveFile(isolate_hash, results_file)\n for row in histograms_df.IterRows(json.loads(histograms)):\n writer.writerow((job_id, change_id, isolate_hash) + row)\n num_rows += 1\n print 'Wrote data from %d histograms in %s.' % (num_rows, output_file)", "def CheckIfJobFinished(jobid, numseq, to_email, g_params): # {{{\n bsname = \"job_final_process\"\n path_result = os.path.join(g_params['path_static'], 'result')\n rstdir = os.path.join(path_result, jobid)\n gen_logfile = g_params['gen_logfile']\n gen_errfile = g_params['gen_errfile']\n name_server = g_params['name_server']\n g_params['jobid'] = jobid\n g_params['numseq'] = numseq\n g_params['to_email'] = to_email\n jsonfile = os.path.join(rstdir, f\"{bsname}.json\")\n myfunc.WriteFile(json.dumps(g_params, sort_keys=True), jsonfile, \"w\")\n binpath_script = os.path.join(g_params['webserver_root'], \"env\", \"bin\")\n\n finished_idx_file = \"%s/finished_seqindex.txt\"%(rstdir)\n failed_idx_file = \"%s/failed_seqindex.txt\"%(rstdir)\n py_scriptfile = os.path.join(binpath_script, f\"{bsname}.py\")\n finished_idx_list = []\n failed_idx_list = []\n if os.path.exists(finished_idx_file):\n finished_idx_list = list(set(myfunc.ReadIDList(finished_idx_file)))\n if os.path.exists(failed_idx_file):\n failed_idx_list = list(set(myfunc.ReadIDList(failed_idx_file)))\n\n lockname = f\"{bsname}.lock\"\n lock_file = os.path.join(g_params['path_result'], g_params['jobid'],\n lockname)\n\n num_processed = len(finished_idx_list)+len(failed_idx_list)\n if num_processed >= numseq: # finished\n if ('THRESHOLD_NUMSEQ_CHECK_IF_JOB_FINISH' in g_params\n and numseq <= g_params['THRESHOLD_NUMSEQ_CHECK_IF_JOB_FINISH']):\n cmd = [\"python\", py_scriptfile, \"-i\", jsonfile]\n (isSubmitSuccess, t_runtime) = webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n elif not os.path.exists(lock_file):\n bash_scriptfile = f\"{rstdir}/{bsname},{name_server},{jobid}.sh\"\n code_str_list = []\n code_str_list.append(\"#!/bin/bash\")\n cmdline = f\"python {py_scriptfile} -i {jsonfile}\"\n code_str_list.append(cmdline)\n code = \"\\n\".join(code_str_list)\n myfunc.WriteFile(code, bash_scriptfile, mode=\"w\", isFlush=True)\n os.chmod(bash_scriptfile, 0o755)\n os.chdir(rstdir)\n cmd = ['sbatch', bash_scriptfile]\n cmdline = \" \".join(cmd)\n verbose = False\n if 'DEBUG' in g_params and g_params['DEBUG']:\n verbose = True\n webcom.loginfo(\"Run cmdline: %s\"%(cmdline), gen_logfile)\n (isSubmitSuccess, t_runtime) = webcom.RunCmd(cmd, gen_logfile, gen_errfile, verbose)\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"isSubmitSuccess: %s\"%(str(isSubmitSuccess)), gen_logfile)", "def _stream_logs(job_id):\n try:\n print(\"Streaming job logs: \")\n process = subprocess.Popen(\n [\"gcloud\", \"ai-platform\", \"jobs\", \"stream-logs\", job_id],\n stdout=subprocess.PIPE,\n )\n while True:\n output = process.stdout.readline()\n # Break out of the loop when poll returns an exit code.\n if process.poll() is not None:\n break\n if output:\n print(output.decode().replace(\"\\x08\", \"\"))\n except (ValueError, OSError) as err:\n print(\"There was an error streaming the job logs.\")\n raise err", "def process_id(job_id):\n pass # Not implemented yet", "async def request_job_info(self, job_id: str, *args, **kwargs) -> dict:\n # TODO: implement\n raise NotImplementedError('{} function \"request_job_info\" not implemented yet'.format(self.__class__.__name__))", "def stdout_download(request, methodoutput_id):\n try:\n methodoutput = MethodOutput.objects.get(pk=methodoutput_id)\n except Dataset.DoesNotExist:\n raise Http404(\"Method output {} cannot be accessed\".format(methodoutput_id))\n\n return build_download_response(methodoutput.output_log)", "def get_job_status(self, job_id: str) -> TableauJobFinishCode:\n return TableauJobFinishCode(int(self.server.jobs.get_by_id(job_id).finish_code))", "def output(self):\n time.sleep(1)\n return luigi.LocalTarget('/tmp/bar/%d' % self.num)", "def delete(self, id):\n result_task = AsyncResult(id = id, app = backapp)\n state = result_task.state\n\n # tasks finished so result exists\n if state == states.SUCCESS:\n try:\n result_task.forget()\n except Exception as e:\n return error(result_task)\n return { 'id': result_task.task_id, 'desc': 'result for job {} deleted'.format(result_task.task_id) }, 200\n # task still pending or unknown - so result do not exists\n elif state == states.PENDING:\n return { 'id': result_task.task_id, 'status': state }, 404\n # task started but result do not exists yet\n elif state == states.STARTED:\n return { 'id': result_task.task_id, 'status': state }, 404\n else:\n return error(result_task)", "def retrieve_archive(self, archive_id, jobid):\n if jobid is None:\n return self.vault.retrieve_archive(archive_id, sns_topic=None, description='Retrieval job')\n else:\n return self.vault.get_job(jobid)", "async def get_satellite_data_ingestion_job_details(\n self,\n job_id: str,\n **kwargs: Any\n ) -> \"_models.SatelliteDataIngestionJob\":\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.SatelliteDataIngestionJob\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2021-03-31-preview\"\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_satellite_data_ingestion_job_details.metadata['url'] # type: ignore\n path_format_arguments = {\n 'Endpoint': self._serialize.url(\"self._config.endpoint\", self._config.endpoint, 'str', skip_quote=True),\n 'jobId': self._serialize.url(\"job_id\", job_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)\n raise HttpResponseError(response=response, model=error)\n\n deserialized = self._deserialize('SatelliteDataIngestionJob', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def get_output(self, save=True, delete_file=None, update=True):\n _logme.log(('Getting output, save={}, auto_delete={}, '\n 'delete_file={}').format(\n save, self.auto_delete, delete_file\n ), 'debug')\n if delete_file is None:\n delete_file = self.clean_outputs\n if self.kind == 'script':\n return self.get_stdout(save=save, delete_file=delete_file,\n update=update)\n if self.done and self._got_out:\n _logme.log('Getting output from _out', 'debug')\n return self._out\n if update and not self._updating and not self.done:\n self.update()\n if not self.done:\n _logme.log('Cannot get pickled output before job completes',\n 'warn')\n return None\n _logme.log('Getting output from {}'.format(self.poutfile), 'debug')\n if _os.path.isfile(self.poutfile):\n with open(self.poutfile, 'rb') as fin:\n out = _pickle.load(fin)\n if delete_file is True or self.auto_delete is True:\n _logme.log('Deleting {}'.format(self.poutfile),\n 'debug')\n _os.remove(self.poutfile)\n if save:\n self._out = out\n self._got_out = True\n return out\n else:\n _logme.log('No file at {} even though job has completed!'\n .format(self.poutfile), 'critical')\n raise IOError('File not found: {}'.format(self.poutfile))", "def get_action_id(output):\n return output['Action queued with id']", "def get_koji_task_result(task_id, remote, ctx):\n py_cmd = ('import koji; '\n 'hub = koji.ClientSession(\"{kojihub_url}\"); '\n 'print(hub.getTaskResult({task_id}))')\n py_cmd = py_cmd.format(\n task_id=task_id,\n kojihub_url=config.kojihub_url\n )\n log.info(\"Querying kojihub for the result of task {0}\".format(task_id))\n task_result = _run_python_command(py_cmd, remote, ctx)\n return task_result", "def get_job_data(jid):\n return jrd.hgetall(_generate_job_key(jid))", "def get_job(\n self, job_id: Union[str, int], *, params: Optional[dict] = None\n ) -> \"resource_types.Job\":\n\n return communicator.Job(self.__requester).from_id(\n job_id=job_id, parameters=params\n )", "def get_user_jobs_route(id):\n user = models.User.query.filter_by(id=id).first()\n\n if user.get_id() is not None:\n _tasks = user.get_tasks_in_progress()\n running_tasks = get_running_task_dicts(_tasks)\n\n _tasks = user.get_finished_tasks()\n finished_tasks = get_finished_task_dicts(_tasks)\n\n response_object = {\n 'running_tasks': running_tasks,\n 'finished_tasks': finished_tasks\n }\n else:\n response_object = {'status': 'error'}\n print(jsonify(response_object))\n return jsonify(response_object)" ]
[ "0.748623", "0.73491913", "0.6980665", "0.6841408", "0.6526476", "0.64631826", "0.63375413", "0.6295551", "0.6289767", "0.62885624", "0.622371", "0.61111414", "0.60730827", "0.59569275", "0.59468514", "0.59456295", "0.5942398", "0.5934416", "0.59119844", "0.5900385", "0.58976066", "0.5890093", "0.58776546", "0.5872237", "0.584167", "0.58045524", "0.57993394", "0.57979333", "0.5793633", "0.57788354", "0.5775862", "0.5770597", "0.57632595", "0.5719357", "0.5709299", "0.5695526", "0.5687907", "0.56730753", "0.5672389", "0.56567246", "0.56481516", "0.5634011", "0.56329346", "0.5616511", "0.56143934", "0.56041443", "0.56030273", "0.5598884", "0.5568138", "0.55659807", "0.55642015", "0.55600274", "0.55491716", "0.553371", "0.5528156", "0.55176884", "0.5503123", "0.5497007", "0.54919994", "0.5476356", "0.5469865", "0.54698646", "0.546405", "0.54571444", "0.5448637", "0.54469985", "0.5437679", "0.5434867", "0.5422522", "0.54223615", "0.54185724", "0.5415284", "0.53982306", "0.5392626", "0.5390284", "0.53881925", "0.53565174", "0.5356101", "0.5354397", "0.5331692", "0.53253746", "0.53253454", "0.53155035", "0.52967364", "0.52957815", "0.5291503", "0.5289125", "0.5283962", "0.52830124", "0.52811486", "0.52809584", "0.52789056", "0.52784926", "0.5276907", "0.5274539", "0.52730936", "0.52693564", "0.5246661", "0.5242785", "0.5238552", "0.5236563" ]
0.0
-1
Parses the FJR produced by job in order to retrieve the WrapperExitCode and ExeExitCode. Updates the BossDB with these values.
def parseFinalReport(self, input): from ProdCommon.FwkJobRep.ReportParser import readJobReport codeValue = {} jreports = readJobReport(input) if len(jreports) <= 0 : codeValue["applicationReturnCode"] = str(50115) codeValue["wrapperReturnCode"] = str(50115) common.logger.debug("Empty FWkobreport: error code assigned is 50115 ") return codeValue jobReport = jreports[0] exit_status = '' ##### temporary fix for FJR incomplete #### fjr = open (input) len_fjr = len(fjr.readlines()) if (len_fjr <= 6): ### 50115 - cmsRun did not produce a valid/readable job report at runtime codeValue["applicationReturnCode"] = str(50115) codeValue["wrapperReturnCode"] = str(50115) if len(jobReport.errors) != 0 : for error in jobReport.errors: if error['Type'] == 'WrapperExitCode': codeValue["wrapperReturnCode"] = error['ExitStatus'] elif error['Type'] == 'ExeExitCode': codeValue["applicationReturnCode"] = error['ExitStatus'] if error['Type'] == 'CMSException': codeValue["applicationReturnCodeOrig"] = error['ExitStatus'] else: continue if not codeValue.has_key('wrapperReturnCode'): codeValue["wrapperReturnCode"] = '' if not codeValue.has_key('applicationReturnCode'): if codeValue.has_key('applicationReturnCodeOrig'): codeValue["applicationReturnCode"] = \ codeValue["applicationReturnCodeOrig"] codeValue.pop("applicationReturnCodeOrig") else: codeValue["applicationReturnCode"] = '' else: if codeValue.has_key('applicationReturnCodeOrig'): codeValue.pop("applicationReturnCodeOrig") #### Filling BOSS DB with SE name and LFN, for edm and not_edm files #### lfns=[] pfns=[] if (len(jobReport.files) != 0): for f in jobReport.files: if f['LFN']: lfns.append(f['LFN']) if f['PFN']: #### FEDE to have the correct endpoit to use in the copyData (we modify the bossDB value and not the fjr ) if common.scheduler.name().upper() not in ['LSF', 'CAF', 'PBS', 'PBSV2'] and codeValue["wrapperReturnCode"] == 60308: pfns.append(os.path.dirname(f['SurlForGrid'])+'/') else: pfns.append(os.path.dirname(f['PFN'])+'/') ########## if (len(jobReport.analysisFiles) != 0): for aFile in jobReport.analysisFiles: if aFile['LFN']: lfns.append(aFile['LFN']) if aFile['PFN']: #### FEDE to have the correct endpoit to use in the copyData (we modify the bossDB value and not the fjr ) if common.scheduler.name().upper() not in ['LSF', 'CAF', 'PBS', 'PBSV2'] and codeValue["wrapperReturnCode"] == 60308: pfns.append(os.path.dirname(aFile['SurlForGrid'])+'/') else: pfns.append(os.path.dirname(aFile['PFN'])+'/') ######### codeValue["storage"] = pfns codeValue["lfn"] = lfns return codeValue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wsParseFJR(self):\n txt = '\\n#Written by cms_cmssw::wsParseFJR\\n'\n txt += 'echo \">>> Parse FrameworkJobReport crab_fjr.xml\"\\n'\n txt += 'if [ -s $RUNTIME_AREA/crab_fjr_$NJob.xml ]; then\\n'\n txt += ' if [ -s $RUNTIME_AREA/parseCrabFjr.py ]; then\\n'\n txt += ' cmd_out=`python $RUNTIME_AREA/parseCrabFjr.py --input $RUNTIME_AREA/crab_fjr_$NJob.xml --dashboard $MonitorID,$MonitorJobID '+self.debugWrap+'`\\n'\n if self.debug_wrapper==1 :\n txt += ' echo \"Result of parsing the FrameworkJobReport crab_fjr.xml: $cmd_out\"\\n'\n txt += ' cmd_out_1=`python $RUNTIME_AREA/parseCrabFjr.py --input $RUNTIME_AREA/crab_fjr_$NJob.xml --popularity $MonitorID,$MonitorJobID,$RUNTIME_AREA/inputsReport.txt '+self.debugWrap+'`\\n'\n# if self.debug_wrapper==1 :\n txt += ' echo \"Result of parsing the FrameworkJobReport crab_fjr.xml: $cmd_out_1\"\\n'\n txt += ' executable_exit_status=`python $RUNTIME_AREA/parseCrabFjr.py --input $RUNTIME_AREA/crab_fjr_$NJob.xml --exitcode`\\n'\n txt += ' if [ $executable_exit_status -eq 50115 ];then\\n'\n txt += ' echo \">>> crab_fjr.xml contents: \"\\n'\n txt += ' cat $RUNTIME_AREA/crab_fjr_$NJob.xml\\n'\n txt += ' echo \"Wrong FrameworkJobReport --> does not contain useful info. ExitStatus: $executable_exit_status\"\\n'\n txt += ' elif [ $executable_exit_status -eq -999 ];then\\n'\n txt += ' echo \"ExitStatus from FrameworkJobReport not available. not available. Using exit code of executable from command line.\"\\n'\n txt += ' else\\n'\n txt += ' echo \"Extracted ExitStatus from FrameworkJobReport parsing output: $executable_exit_status\"\\n'\n txt += ' fi\\n'\n txt += ' else\\n'\n txt += ' echo \"CRAB python script to parse CRAB FrameworkJobReport crab_fjr.xml is not available, using exit code of executable from command line.\"\\n'\n txt += ' fi\\n'\n #### Patch to check input data reading for CMSSW16x Hopefully we-ll remove it asap\n txt += ' if [ $executable_exit_status -eq 0 ];then\\n'\n txt += ' echo \">>> Executable succeded $executable_exit_status\"\\n'\n txt += ' fi\\n'\n txt += 'else\\n'\n txt += ' echo \"CRAB FrameworkJobReport crab_fjr.xml is not available, using exit code of executable from command line.\"\\n'\n txt += 'fi\\n'\n txt += '\\n'\n txt += 'if [ $executable_exit_status -ne 0 ];then\\n'\n txt += ' echo \">>> Executable failed $executable_exit_status\"\\n'\n txt += ' echo \"ExeExitCode=$executable_exit_status\" | tee -a $RUNTIME_AREA/$repo\\n'\n txt += ' echo \"EXECUTABLE_EXIT_STATUS = $executable_exit_status\"\\n'\n txt += ' job_exit_code=$executable_exit_status\\n'\n txt += ' func_exit\\n'\n txt += 'fi\\n\\n'\n txt += 'echo \"ExeExitCode=$executable_exit_status\" | tee -a $RUNTIME_AREA/$repo\\n'\n txt += 'echo \"EXECUTABLE_EXIT_STATUS = $executable_exit_status\"\\n'\n txt += 'job_exit_code=$executable_exit_status\\n'\n\n return txt", "def new_result(self, job, update_model=True):\n\t\tif not job.exception is None:\n\t\t\tself.logger.warning(\"job {} failed with exception\\n{}\".format(job.id, job.exception))", "def process_jobresults(jobid, jobresult_zip, crashdb, cnx):\n if not cnx:\n print(\"No MySQL connection provided, won't export\")\n\n crashdb = jobresult_zip.extract(crashdb)\n\n def export_to_mysql(entry, mysql_cnx):\n \"\"\"\n Export a crash into mysql.\n\n :param entry: row from crashdb as dictionary\n :param mysql_cnx: connection to MySQL\n \"\"\"\n mysql_cursor = mysql_cnx.cursor()\n\n entry = defaultdict(lambda: None, entry)\n\n query = \"INSERT INTO crashes (job_id, type, is_security_issue, is_crash, sample_path, backtrace, backtrace_hash, return_code, create_time) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, NOW())\"\n try:\n mysql_cursor.execute(\n query,\n (\n jobid,\n entry[\"type\"],\n entry[\"is_security_issue\"],\n entry[\"is_crash\"],\n entry[\"sample\"],\n entry[\"backtrace\"],\n entry[\"hash\"],\n entry[\"return_code\"],\n ),\n )\n except mysql.connector.errors.IntegrityError as err:\n print(\"Integrity: {}\".format(err))\n\n mysql_cnx.commit()\n mysql_cursor.close()\n\n def dict_factory(cursor, row):\n \"\"\"\n Factory function for sqlite3 cursor.\n\n :return: each row as a dict having structure {\"colname\": value}\n \"\"\"\n rowdict = {}\n for idx, col in enumerate(cursor.description):\n rowdict[col[0]] = row[idx]\n return rowdict\n\n cdbcon = sqlite3.connect(crashdb)\n cdbcon.row_factory = dict_factory\n cdbcur = cdbcon.cursor()\n result = cdbcur.execute(\"SELECT * FROM analysis\")\n if not result:\n print(\"Crash database empty, nothing to export\")\n return\n\n result = [dict(row) for row in result.fetchall()]\n\n # insert into mysql\n for analysis in result:\n # pull backtrace, if any, into dictionary\n print(\"Exporting entry: {}\".format(analysis))\n\n analysis[\"hash\"] = hashlib.md5(bytes(analysis[\"backtrace\"], encoding=\"utf8\")).hexdigest()\n analysis[\"is_crash\"] = bool(analysis[\"is_crash\"])\n analysis[\"is_security_issue\"] = bool(analysis[\"is_security_issue\"])\n # unused\n analysis[\"should_ignore\"] = analysis[\"should_ignore\"]\n\n if cnx:\n export_to_mysql(analysis, cnx)\n\n cdbcon.close()", "def get_result(self):\n config = self.bisect_config\n results_confidence = 0\n if self.culprit:\n results_confidence = self.api.m.math_utils.confidence_score(\n self.lkgr.values, self.fkbr.values)\n\n if self.failed:\n status = 'failed'\n elif self.bisect_over:\n status = 'completed'\n else:\n status = 'started'\n\n aborted_reason = None\n if self.failed_initial_confidence:\n aborted_reason = _FAILED_INITIAL_CONFIDENCE_ABORT_REASON\n elif self.failed_direction:\n aborted_reason = _DIRECTION_OF_IMPROVEMENT_ABORT_REASON\n return {\n 'try_job_id': config.get('try_job_id'),\n 'bug_id': config.get('bug_id'),\n 'status': status,\n 'buildbot_log_url': self._get_build_url(),\n 'bisect_bot': self.get_perf_tester_name(),\n 'command': config['command'],\n 'test_type': config['test_type'],\n 'metric': config['metric'],\n 'change': self.relative_change,\n 'score': results_confidence,\n 'good_revision': self.good_rev.commit_hash,\n 'bad_revision': self.bad_rev.commit_hash,\n 'warnings': self.warnings,\n 'aborted_reason': aborted_reason,\n 'culprit_data': self._culprit_data(),\n 'revision_data': self._revision_data()\n }", "def updater_job_status(self,request):\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/status invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\n\t\t# First check if a job is running. This will update the\n\t\t# internal field self._current_job, or if the job is finished,\n\t\t# it would return an empty string.\n\t\tinst = self.__which_job_is_running()\n\n\t\tjob = request.options.get('job','')\n\t\tresult = {}\n\t\tif job in INSTALLERS:\n\t\t\t# make a copy, not a reference!\n#\t\t\tresult = {}\n#\t\t\tfor arg in INSTALLERS[job]:\n#\t\t\t\tresult[arg] = INSTALLERS[job][arg]\n\t\t\tresult = deepcopy(INSTALLERS[job])\n\n\t\t\tif 'statusfile' in INSTALLERS[job]:\n\t\t\t\ttry:\n\t\t\t\t\tfor line in open(INSTALLERS[job]['statusfile']):\n\t\t\t\t\t\tfields = line.strip().split('=')\n\t\t\t\t\t\tif len(fields) == 2:\n\t\t\t\t\t\t\tresult['_%s_' % fields[0]] = fields[1]\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t# if we encounter that the frontend asks about the last job we\n\t\t\t# have executed -> include its properties too.\n\t\t\tif self._current_job:\n\t\t\t\tif self._current_job['job'] == job:\n\t\t\t\t\tfor f in self._current_job:\n\t\t\t\t\t\tresult[f] = self._current_job[f]\n\t\t\t\t\t\tif isinstance(result[f],str) and result[f].isdigit():\n\t\t\t\t\t\t\tresult[f] = int(result[f])\n\t\t\t\tif inst == '':\n\t\t\t\t\tresult['running'] = False\n\t\t\telse:\n\t\t\t\t# no job running but status for release was asked? \n\t\t\t\t# maybe the server restarted after job finished\n\t\t\t\t# and the frontend did not get that information\n\t\t\t\t# Bug #26318\n\t\t\t\tif job == 'release':\n\t\t\t\t\tresult['detail'] = '%s-%s' % (self.ucr.get('version/version'), self.ucr.get('version/patchlevel'))\n\t\t\t\telse:\n\t\t\t\t\tresult['detail'] = _('Unknown')\n\n\t\t\t# -------------- additional fields -----------------\n\n\t\t\t# elapsed time, ready to be displayed. (not seconds, but rather\n\t\t\t# the formatted string)\n\t\t\tif 'time' in result and 'started' in result:\n\t\t\t\telapsed = result['time'] - result['started']\n\t\t\t\tif elapsed < 60:\n\t\t\t\t\tresult['elapsed'] = '%ds' % elapsed\n\t\t\t\telse:\n\t\t\t\t\tmins = int(elapsed/60)\n\t\t\t\t\tsecs = elapsed - (60 * mins)\n\t\t\t\t\tif mins < 60:\n\t\t\t\t\t\tresult['elapsed'] = '%d:%02dm' % (mins,secs)\n\t\t\t\t\telse:\n\t\t\t\t\t\thrs = int(mins/60)\n\t\t\t\t\t\tmins = mins - (60*hrs)\n\t\t\t\t\t\tresult['elapsed'] = '%d:%02d:%02dh' % (hrs,mins,secs)\n\t\t\t# Purpose is now formatted in the language of the client (now that\n\t\t\t# this LANG is properly propagated to us)\n\t\t\tif 'purpose' in result:\n\t\t\t\tif result['purpose'].find('%') != -1:\n\t\t\t\t\t# make sure to not explode (Bug #26318), better show nothing\n\t\t\t\t\tif 'detail' in result:\n\t\t\t\t\t\tresult['label'] = result['purpose'] % result['detail']\n\t\t\t\telse:\n\t\t\t\t\tresult['label'] = result['purpose']\n\t\t\t# Affordance to reboot... hopefully this gets set before\n\t\t\t# we stop polling on this job status\n\t\t\tself.ucr.load()\t# make it as current as possible\n\t\t\tresult['reboot'] = self.ucr.is_true('update/reboot/required',False)\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/status returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# -----------------------------------\n\n\t\tself.finished(request.id,result)", "def errors(job_name, jenkins_username, jenkins_token):\n global server\n# job_name = 'enterprise_pe-acceptance-tests_integration-system_pe_full-upgrade_weekend_2016.4.x' # 'enterprise_pe-orchestrator_intn-van-sys-pez-multi_2016.4.x-2016.4.x' # 'enterprise_pe-modules-vanagon-suite_intn-van-sys-pez-multi_daily-pe-modules-2016.4.x'\n server = Jenkins('https://cinext-jenkinsmaster-enterprise-prod-1.delivery.puppetlabs.net', username=jenkins_username, password=jenkins_token)\n info = server.get_job_info(job_name)\n builds = [server.get_build_info(job_name, build['number']) for build in info['builds']]\n failed_build_numbers = [b for b in builds if b['result'] == 'FAILURE']\n last_job_errors = None\n\n counts = defaultdict(int)\n similar = set()\n for build in failed_build_numbers:\n output = server.get_build_console_output(job_name, build['number'])\n finder = get_strategy(output)\n errors = finder(output)\n print \"Errors: {}\".format(errors)\n if last_job_errors:\n seq = difflib.SequenceMatcher(a=last_job_errors, b=errors)\n if seq.ratio() == 1.0:\n counts['exact'] += 1\n if seq.ratio() >= 0.7 and seq.ratio() < 1.0:\n counts['similar'] += 1\n similar.append(errors)\n else:\n last_job_errors = errors\n\n if last_job_errors:\n click.echo('Last job errors were:')\n click.echo('\\t{}'.format('\\n\\t'.join(last_job_errors)))\n\n if last_job_errors and 'exact' in counts:\n click.echo('There were {} jobs that failed with errors exactly the same as the last failed job:'.format(counts['exact']))\n click.echo('\\t{}'.format('\\n\\t'.join(last_job_errors)))\n\n if last_job_errors and 'similar' in counts:\n click.echo('There were {} jobs that failed with experienced similar errors as the last failed job:'.format(counts['exact']))\n click.echo('\\t{}'.format('\\n\\t'.join(last_job_errors)))\n for s in similar:\n click.echo('Additional Failed Job:')\n click.echo('\\t{}'.format('\\n\\t'.join(s)))", "def eval_result(self, jdata):\n with current_app.app_context():\n out_msg = helpers.file_result(jdata)\n jdata['status_f'] = \"Complete\"\n if jdata['status'] == 2 or jdata['status'] == 3:\n jdata['status_f'] = \"Error\"\n db.session.query(File).filter(File.sha1 == jdata[\"sha1\"]).update({\n File.status_f: jdata['status_f'],\n File.score: jdata['score'],\n File.exec_time: jdata['exec_time'],\n #File.date_b: jdata['server_time'],\n #File.date_b: dtime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3].datetime(),\n File.date_b: dtime.now(),\n File.message: out_msg,\n File.results: json.dumps(jdata)\n #File.results: jdata\n #File.results: {}\n })\n db.session.commit()\n return self.update_state(state='SUCCESS')", "def _check_results(self):\n if not 'EXECUTION OF GAMESS TERMINATED NORMALLY' in self.file_dic['output']:\n print self.job_name + \" didn't finish\"\n raise TypeError('Calculation didn\\'t finish')", "def t_status_process(self, *args, **kwargs):\n\n self.dp.qprint(\"In status process...\")\n\n d_state = self.job_state(*args, **kwargs)\n\n d_ret = d_state['d_ret']\n b_status = d_state['status']\n\n l_keys = d_ret.items()\n l_status = []\n for i in range(0, int(len(l_keys)/2)):\n b_startEvent = d_ret['%s.start' % str(i)]['startTrigger'][0]\n try:\n endcode = d_ret['%s.end' % str(i)]['returncode'][0]\n except:\n endcode = None\n\n if endcode == None and b_startEvent:\n l_status.append('started')\n if not endcode and b_startEvent and type(endcode) is int:\n l_status.append('finishedSuccessfully')\n if endcode and b_startEvent:\n l_status.append('finishedWithError')\n\n self.dp.qprint('b_startEvent = %d' % b_startEvent)\n self.dp.qprint(endcode)\n self.dp.qprint('l_status = %s' % l_status)\n\n d_ret['l_status'] = l_status\n return {\"d_ret\": d_ret,\n \"status\": b_status}", "def return_results(self):\n\n caching_info = f'INFO: cache_source of BS calc node: {self.ctx.BS_run.get_cache_source}'\n self.report(caching_info)\n\n if not self.ctx.BS_run.is_finished_ok:\n self.ctx.successful = False\n error = f'ERROR BS calculation failed somehow it is in state {self.ctx.BS_run.process_state}'\n self.report(error)\n self.ctx.errors.append(error)\n return self.exit_codes.ERROR_BS_CALC_FAILED # pylint: disable=no-member\n\n # create dict to store results of workflow output\n outputnode_dict = {}\n outputnode_dict['workflow_name'] = self.__class__.__name__\n outputnode_dict['workflow_version'] = self._wf_version\n outputnode_dict['withmpi'] = self.ctx.withmpi\n outputnode_dict['resources'] = self.ctx.resources\n outputnode_dict['max_wallclock_seconds'] = self.ctx.max_wallclock_seconds\n outputnode_dict['queue_name'] = self.ctx.queue\n outputnode_dict['custom_scheduler_commands'] = self.ctx.custom_scheduler_commands\n outputnode_dict['BS_params'] = self.ctx.BS_params_dict\n if 'kpoints' not in self.inputs:\n outputnode_dict['structure_type'] = self.ctx.structure_data\n outputnode_dict['BS_wf_description'] = self.ctx.description_wf\n outputnode_dict['BS_wf_label'] = self.ctx.label_wf\n try:\n outputnode_dict['nspin'] = self.ctx.BS_run.res.nspin\n except:\n error = 'ERROR: nspin not extracted'\n self.report(error)\n self.ctx.successful = False\n self.ctx.errors.append(error)\n outputnode_dict['successful'] = self.ctx.successful\n outputnode_dict['list_of_errors'] = self.ctx.errors\n\n # create output node with data-provenance\n outputnode = Dict(outputnode_dict)\n outputnode.label = 'kkr_BS_wc_results'\n outputnode.description = 'Contains the info of the WC'\n\n self.report('INFO: create Banstructure results nodes')\n try:\n self.report(\n f'INFO: create Bandstructure results nodes. BS calc retrieved node={self.ctx.BS_run.outputs.retrieved}'\n )\n has_BS_run = True\n except AttributeError as e:\n self.report('ERROR: No Bandstructure calc retrieved node found')\n self.report(f'Caught AttributeError {e}')\n return self.exit_codes.ERROR_BS_CALC_FAILED # pylint: disable=no-member\n\n if has_BS_run:\n BS_retrieved = self.ctx.BS_run.outputs.retrieved\n\n ef = self.ctx.fermi_energy # in Ry unit\n kpoints = self.ctx.BS_kpoints\n\n # Here outdict dictionary has been created to set the Dict result_wf, BS_data\n # to the output(spec.output) of the wf\n outdict = {}\n if has_BS_run:\n ArraData = parse_BS_data(BS_retrieved, Float(ef), kpoints)\n outdict['BS_Data'] = ArraData['BS_Data']\n\n # link to the BS output nodes\n link_nodes = outdict.copy()\n\n outdict['results_wf'] = create_out_dict_node(outputnode, **link_nodes)\n\n # create links to output nodes\n for link_name, node in outdict.items():\n self.out(link_name, node)\n\n self.report('INFO: done with BS_workflow!\\n')", "def get_ret_code(self):\n\t\treturn call_sdk_function('PrlJob_GetRetCode', self.handle)", "def jobFailed(self, jobName):\n\n # ignore non merge jobs\n if jobName.find('mergejob') == -1:\n logging.info(\"Ignoring job %s, since it is not a merge job\" \\\n % jobName)\n # Add cleanup flag for non merge jobs too\n logging.info(\"trigger cleanup for: %s\" % jobName)\n try:\n self.trigger.setFlag(\"cleanup\", jobName, \"MergeAccountant\")\n except (ProdAgentException, ProdException):\n logging.error(\"trying to continue processing failure event\")\n return\n\n # files can be cleaned up now\n logging.info(\"trigger cleanup for: %s\" % jobName)\n\n try:\n self.trigger.setFlag(\"cleanup\", jobName, \"MergeAccountant\")\n except (ProdAgentException, ProdException):\n logging.error(\"trying to continue processing failure event\")\n\n # verify enable condition\n if not self.enabled:\n return\n\n # open a DB connection\n database = MergeSensorDB()\n\n # start a transaction\n database.startTransaction()\n\n # get job information\n try:\n jobInfo = database.getJobInfo(jobName)\n\n # cannot get it!\n except Exception, msg:\n logging.error(\"Cannot process Failure event for job %s: %s\" \\\n % (jobName, msg))\n database.closeDatabaseConnection()\n return\n\n # check that job exists\n if jobInfo is None:\n logging.error(\"Job %s does not exist.\" % jobName)\n database.closeDatabaseConnection()\n return\n\n # check status\n if jobInfo['status'] != 'undermerge':\n logging.error(\"Cannot process Failure event for job %s: %s\" \\\n % (jobName, \"the job is not currently running\"))\n database.closeDatabaseConnection()\n\n return\n\n # get dataset id\n datasetId = database.getDatasetId(jobInfo['datasetName'])\n\n # mark all input files as 'unmerged' (or 'invalid')\n unFinishedFiles = []\n for fileName in jobInfo['inputFiles']:\n\n # update status\n newStatus = database.updateInputFile(\\\n datasetId, fileName, \\\n status = \"unmerged\", \\\n maxAttempts = int(self.args['MaxInputAccessFailures']))\n\n # add invalid files to list of non finished files\n if newStatus == 'invalid':\n unFinishedFiles.append(fileName)\n\n # mark output file as 'failed'\n database.updateOutputFile(datasetId, jobName=jobName, status='failed')\n\n # commit changes\n database.commit()\n\n # notify the PM about the unrecoverable files\n if len(unFinishedFiles) > 0:\n File.merged(unFinishedFiles, True)\n\n # log message\n logging.info(\"Job %s failed, file information updated.\" % jobName)\n\n # close connection\n database.closeDatabaseConnection()", "def wsModifyReport(self, nj):\n\n txt = ''\n if (self.copy_data == 1):\n txt = '\\n#Written by cms_cmssw::wsModifyReport\\n'\n\n txt += 'echo \">>> Modify Job Report:\" \\n'\n txt += 'chmod a+x $RUNTIME_AREA/ProdCommon/FwkJobRep/ModifyJobReport.py\\n'\n txt += 'echo \"CMSSW_VERSION = $CMSSW_VERSION\"\\n\\n'\n\n args = 'fjr $RUNTIME_AREA/crab_fjr_$NJob.xml json $RUNTIME_AREA/resultCopyFile n_job $OutUniqueID PrimaryDataset $PrimaryDataset ApplicationFamily $ApplicationFamily ApplicationName $executable cmssw_version $CMSSW_VERSION psethash $PSETHASH'\n\n if (self.publish_data == 1):\n txt += 'ProcessedDataset='+self.processedDataset+'\\n'\n txt += 'echo \"ProcessedDataset = $ProcessedDataset\"\\n'\n args += ' UserProcessedDataset $USER-$ProcessedDataset-$PSETHASH'\n\n txt += 'echo \"$RUNTIME_AREA/ProdCommon/FwkJobRep/ModifyJobReport.py '+str(args)+'\"\\n'\n txt += '$RUNTIME_AREA/ProdCommon/FwkJobRep/ModifyJobReport.py '+str(args)+'\\n'\n txt += 'modifyReport_result=$?\\n'\n txt += 'if [ $modifyReport_result -ne 0 ]; then\\n'\n txt += ' modifyReport_result=70500\\n'\n txt += ' job_exit_code=$modifyReport_result\\n'\n txt += ' echo \"ModifyReportResult=$modifyReport_result\" | tee -a $RUNTIME_AREA/$repo\\n'\n txt += ' echo \"WARNING: Problem with ModifyJobReport\"\\n'\n txt += 'else\\n'\n txt += ' mv NewFrameworkJobReport.xml $RUNTIME_AREA/crab_fjr_$NJob.xml\\n'\n txt += 'fi\\n'\n return txt", "def process_ResultCheck(self):\n try:\n cmd = self.ExecutionTask.get_param().split(',')\n logging.debug(\"%s-%s-%s-%s-%s\" % ( TestScriptSymbolTable.get_value_from_sym_tab(cmd[0], TestScriptSymbolTable.test_script_sym_tab),cmd[0], cmd[1], cmd[2], cmd[3]))\n\n checkval = cmd[0].split('!') \n \n cval = TestScriptSymbolTable.get_value_from_sym_tab(checkval[1], TestScriptSymbolTable.capi_cmd_ret_sym_tab)\n\n if int(cval) >= int(cmd[1]):\n result = cmd[2]\n else:\n result = cmd[3]\n\n logging.info(\"\\nRESULT CHECK---> %15s\", result) \n self.setTestResult(result)\n \n #if result == 'FAIL':\n if 'FAIL' in result:\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n except OSError:\n logging.info(\"\\nException - ResultCheck\")", "def report(self):\n\n job_summary = {}\n for job in self._jobs:\n \n if job.step_name not in job_summary:\n job_summary[ job.step_name ] = {}\n job_summary[ job.step_name ][ 'DONE' ] = 0\n job_summary[ job.step_name ][ 'RUNNING' ] = 0\n job_summary[ job.step_name ][ 'QUEUING' ] = 0\n job_summary[ job.step_name ][ 'FAILED' ] = 0\n job_summary[ job.step_name ][ 'UNKNOWN' ] = 0\n job_summary[ job.step_name ][ 'max_mem' ] = 0\n job_summary[ job.step_name ][ 'cputime' ] = 0\n\n if job.status == Job_status.FINISHED:\n job_summary[ job.step_name ][ 'DONE' ] += 1\n if job.cputime is not None:\n job_summary[ job.step_name ]['cputime'] += int(job.cputime)\n\n if job.max_memory is not None and job.max_memory > job_summary[ job.step_name ][ 'max_mem']:\n job_summary[ job.step_name ][ 'max_mem'] = int(job.max_memory)\n\n elif job.status == Job_status.RUNNING:\n job_summary[ job.step_name ][ 'RUNNING' ] += 1\n elif job.status == Job_status.QUEUEING or job.status == Job_status.SUBMITTED:\n job_summary[ job.step_name ][ 'QUEUING' ] += 1\n elif job.status == Job_status.FAILED or job.status == Job_status.NO_RESTART:\n job_summary[ job.step_name ][ 'FAILED' ] += 1\n else:\n job_summary[ job.step_name ][ 'UNKNOWN' ] += 1\n\n\n\n local_time = strftime(\"%d/%m/%Y %H:%M\", time.localtime())\n \n\n pickle_file = \"{}.{}\".format(self.pipeline.project_name, self.pipeline._pid)\n\n print(\"[{} @{} {}]\".format( local_time,self.pipeline._hostname , pickle_file))\n\n print(\"{:20} || {:12} || {:12} || {:2s} {:2s} {:2s} {:2s} {:2s}\".format(\"Run stats\", \"Runtime\", \"Max Mem\", \"D\",\"R\",\"Q\",\"F\",\"U\"))\n\n for step in sorted(self.pipeline._workflow._analysis_order, key=self.pipeline._workflow._analysis_order.__getitem__):\n if step not in job_summary:\n continue\n\n print(\"{:20} || {:12} || {:12} || {:02d}/{:02d}/{:02d}/{:02d}/{:02d}\".format(step, \n self.format_time(job_summary[ step ]['cputime']),\n self.format_memory(job_summary[ step ]['max_mem']),\n job_summary[ step ][ 'DONE' ],\n job_summary[ step ][ 'RUNNING' ],\n job_summary[ step ][ 'QUEUING' ],\n job_summary[ step ][ 'FAILED' ],\n job_summary[ step ][ 'UNKNOWN' ]))", "def find_job_and_job_status(self):\n\n def find_job_and_job_status_log_history(f):\n rcelog('critical', \"find_job_and_status(): Found job {0} in history. Terminated in error.\".\n format(self.id))\n return f\n\n try:\n return self.__get_job_status_from_queue__()\n except:\n pass\n\n try:\n return find_job_and_job_status_log_history(self.__get_job_status_from_history__())\n except:\n return (None, None)", "def updateRcloneJobStatus():\n global jobIds, jobStatusGauge\n\n # Check if the jobs are running, update the variables\n for jobName, jobId in jobIds.items():\n jobIsRunning = getRcloneJobRunning(jobId)\n jobIds[jobName] = jobId if jobIsRunning else None\n jobStatusGauge.labels(rclone_job=jobName).set(1 if jobIsRunning else 0)", "def jobSuccess(self, jobReport):\n\n\n\n jobName = None\n try:\n\n #// Invoke job report handler with jobReport location and flag to enable/disable merge job report handling\n\n handler = ReportHandler(jobReport, int(self.args['MaxInputAccessFailures']), enableMergeHandling=self.enabled)\n jobName = handler()\n logging.info('this is jobname'+ str(jobName))\n except Exception, ex:\n msg = \"Failed to handle job report from job:\\n\"\n msg += \"%s\\n\" % jobReport\n msg += str(ex)\n msg += \"\\n\"\n msg += traceback.format_exc()\n logging.error(msg)\n\n #// Failed to read job report\n if jobName is None:\n return\n\n # files can be cleaned up now\n logging.info(\"trigger cleanup for: %s\" % jobName)\n\n try:\n self.trigger.setFlag(\"cleanup\", jobName, \"MergeAccountant\")\n except (ProdAgentException, ProdException):\n logging.error(\"trying to continue processing success event\")\n\n\n\n\n return #// END jobSuccess", "def _get_module_return_code(self, status, module):\n\n # initialize return code array\n arr = []\n check_failed = False\n\n if module not in status.data:\n # assume running\n arr = [1]\n else:\n for job_name in status.data[module].keys():\n if job_name != 'pipeline_index':\n\n # update the job status and get the status string\n status._update_job_status(module, job_name)\n js = status.data[module][job_name]['job_status']\n\n if js == 'successful':\n arr.append(0)\n elif js == 'failed':\n arr.append(2)\n check_failed = True\n elif js is None:\n arr.append(3)\n else:\n arr.append(1)\n\n status._dump()\n\n return_code = self._parse_code_array(arr)\n\n status = self.RETURN_CODES[return_code]\n fail_str = ''\n if check_failed and status != 'failed':\n fail_str = ', but some jobs have failed'\n logger.info('Module \"{}\" for job \"{}\" is {}{}.'\n .format(module, self._config.name, status, fail_str))\n\n return return_code", "def runjob(job):\n inputfiles = glob.glob(job['InputFile'])\n logecho(' %s file/s found for %s: ' %\n (len(inputfiles), job['InputFile']))\n\n # process files, order by most recent\n inputfiles.sort(key=os.path.getmtime, reverse=True)\n for inputfile in inputfiles:\n inputfile_error = False\n inputfile_errordetails = ''\n t1_startdt = datetime.now()\n starttime = t1_startdt.strftime('%Y-%m-%d %H:%M:%S')\n t1_start = perf_counter()\n dupecount = 0\n dupesremoved = 0\n resource_id = ''\n\n logecho(' Processing: %s...' % inputfile)\n\n def custom_date_parser(x): return dateparser.parse(\n x, date_formats=dateformats_list)\n\n df = pd.read_csv(inputfile, parse_dates=[\n datecolumn], date_parser=custom_date_parser,\n skipinitialspace=True)\n\n if job['Dedupe']:\n pkey_list = list(job['PrimaryKey'].split(','))\n\n # first, count number of dupe rows for logging\n dupecount = df.duplicated(subset=pkey_list, keep='first').sum()\n\n dedupe_flag = job['Dedupe']\n if dedupe_flag == 'first' or dedupe_flag == 'last':\n df.drop_duplicates(\n subset=pkey_list, keep=dedupe_flag, inplace=True)\n dupesremoved = dupecount\n\n colname_list = df.columns.tolist()\n\n coltype_list = []\n for column in df:\n coltype_list.append(get_col_dtype(df[column]))\n\n fields_dictlist = []\n for i in range(0, len(colname_list)):\n fields_dictlist.append({\n \"id\": colname_list[i],\n \"type\": coltype_list[i][0]\n })\n if coltype_list[i][0] == 'timestamp':\n df[colname_list[i]] = df[colname_list[i]].astype(str)\n\n logecho('FIELDS_DICTLIST: %s' % fields_dictlist, level='debug')\n\n data_dict = df.to_dict(orient='records')\n\n # check if resource exists\n # this works only when TargetResource is an existing\n # resource id hash\n try:\n resource = portal.action.resource_show(\n id=job['TargetResource'])\n except:\n logecho(' Resource \"%s\" is not a resource id.' %\n job['TargetResource'])\n resource = ''\n else:\n existing_resource_desc = resource['description']\n\n if not resource:\n # resource doesn't exist. Check if package exists\n try:\n package = portal.action.package_show(\n id=job['TargetPackage'])\n except:\n package = ''\n\n if not package:\n # package doesn't exist. Create it\n # first, check if TargetOrg exist\n logecho(' Creating package \"%s\"...' %\n job['TargetPackage'])\n\n if not (job['TargetOrg'] in org_list):\n errmsg = 'TargetOrg \"%s\" does not exist!' % job['TargetOrg']\n logecho(errmsg, level='error')\n sys.exit(errmsg)\n\n try:\n package = portal.action.package_create(\n name=job['TargetPackage'],\n private=False,\n owner_org=job['TargetOrg']\n )\n except Exception as e:\n logecho(' Cannot create package \"%s\"!' %\n job['TargetPackage'], level='error')\n inputfile_error = True\n inputfile_errordetails = str(e)\n package = ''\n else:\n logecho(' Created package \"%s\"...' %\n job['TargetPackage'])\n else:\n logecho(' Package \"%s\" found...' % job['TargetPackage'])\n\n logecho('PACKAGE: %s\\n\\nFIELDS: %s' %\n (package, fields_dictlist), level='debug')\n # logecho('RECORDS: %s\\n' % data_dict, level='debug')\n\n # now check if resource name already exists in package\n resource_exists = False\n existing_resource_desc = ''\n resources = package.get('resources')\n for resource in resources:\n if resource['name'] == job['TargetResource']:\n resource_exists = True\n existing_resource_desc = resource['description']\n resource_id = resource['id']\n break\n\n #resource_id = ''\n if package and resource_exists:\n\n if job['Truncate']:\n try:\n result = portal.action.datastore_delete(\n resource_id=resource['id'],\n force=True\n )\n except Exception as e:\n logecho(' Truncate failed',\n level='error')\n inputfile_error = True\n inputfile_errordetails = str(e)\n\n logecho(' \"%s\" (%s) exists in package \"%s\". Doing datastore_upsert...' % (\n job['TargetResource'], resource['id'], job['TargetPackage']))\n try:\n result = portal.action.datastore_upsert(\n force=True,\n resource_id=resource['id'],\n records=data_dict,\n method='upsert',\n calculate_record_count=True\n )\n except Exception as e:\n logecho(' Upsert failed', level='error')\n inputfile_error = True\n inputfile_errordetails = str(e)\n else:\n logecho(' Upsert successful! %s rows...' %\n len(data_dict))\n #resource_id = result['resource_id']\n #resource_id = resource['id']\n else:\n logecho(' \"%s\" does not exist in package \"%s\". Doing datastore_create...' % (\n job['TargetResource'], job['TargetPackage']))\n\n alias = '%s-%s-%s' % (job['TargetOrg'],\n job['TargetPackage'], job['TargetResource'])\n resource = {\n \"package_id\": package['id'],\n \"format\": \"csv\",\n \"name\": job['TargetResource']\n }\n try:\n resource = portal.action.datastore_create(\n force=True,\n resource=resource,\n aliases='',\n fields=fields_dictlist,\n records=data_dict,\n primary_key=job['PrimaryKey'],\n indexes=job['PrimaryKey'],\n calculate_record_count=False\n )\n except Exception as e:\n logecho(' Cannot create resource \"%s\"!' %\n job['TargetResource'], level='error')\n inputfile_error = True\n inputfile_errordetails = str(e)\n else:\n logecho(' Created resource \"%s\"...' %\n job['TargetResource'])\n resource_id = resource['resource_id']\n resource = portal.action.datastore_create(\n force=True,\n resource_id=resource_id,\n aliases=alias,\n calculate_record_count=True\n )\n\n logecho('EXISTING DESC for resource %s: %s' %\n (resource_id, existing_resource_desc), level='debug')\n updated_desc = ''\n if existing_resource_desc:\n result = re.split(r' \\(UPDATED: (.*?)\\)$',\n existing_resource_desc)\n if len(result) == 3:\n # there is an old update date\n updated_desc = result[0]\n else:\n updated_desc = existing_resource_desc\n updated_desc = updated_desc + ' (UPDATED: %s)' % starttime\n logecho('RESOURCE UPDATED DESC: %s: %s' %\n (resource_id, updated_desc), level='debug')\n portal.action.resource_update(\n id=resource_id,\n description=updated_desc)\n\n logecho('RESOURCE: %s' % resource, level='debug')\n\n if job['Stats'] and resource_id:\n logecho(' Computing stats...')\n result = computestats(\n job['Stats'], job['PrimaryKey'], package, resource_id, job['TargetResource'],\n starttime)\n\n t1_stop = perf_counter()\n t1_stopdt = datetime.now()\n endtime = t1_stopdt.strftime('%Y-%m-%d %H:%M:%S')\n elapsed = t1_stop - t1_start\n\n if inputfile_error:\n # inputfile processing failed, move to problemsdir\n try:\n shutil.move(inputfile, problemsdir + '/' +\n os.path.basename(inputfile))\n except Exception as e:\n errmsg = 'Cannot move %s to %s: %s' % (\n inputfile, problemsdir, str(e))\n logecho(errmsg, level='error')\n problems_logger.error(errmsg)\n\n error_details = '- FILE: %s START: %s END: %s ELAPSED: %s DUPES: %s/%s ERRMSG: %s' % (\n inputfile, starttime, endtime, elapsed, dupecount, dupesremoved, inputfile_errordetails)\n problems_logger.info(error_details)\n else:\n # inputfile was successfully processed, move to processeddir\n try:\n shutil.move(inputfile, processeddir + '/' +\n os.path.basename(inputfile))\n except Exception as e:\n errmsg = 'Cannot move %s to %s: %s' % (\n inputfile, processeddir, str(e))\n logecho(errmsg, level='error')\n processed_logger.error(errmsg)\n\n processed = len(df.index) if 'df' in locals() else 0\n processed_details = '- FILE: %s START: %s END: %s ELAPSED: %s DUPES: %s/%s PROCESSED: %s' % (\n inputfile, starttime, endtime, elapsed, dupecount, dupesremoved, processed)\n processed_logger.info(processed_details)\n\n logecho(' Processed %s file/s...' % len(inputfiles))", "def get_exit_code(self):", "def RetrievePipelineToolStatus( raiseOnExitCode=False ):\n global submissionInfo\n\n scenePath = NodegraphAPI.GetSourceFile()\n\n jobWriterPath = os.path.join(submissionInfo[\"RepoDirs\"][\"submission/Integration/Main\"], \"JobWriter.py\")\n argArray = [\"-ExecuteScript\", jobWriterPath, \"Katana\", \"--status\", \"--scene-path\", scenePath]\n statusMessage = CallDeadlineCommand(argArray, hideWindow=False, raiseOnExitCode=raiseOnExitCode)\n return statusMessage", "def test_successful_job(self, _is_coalesced):\n successful_job = json.loads(BASE_JSON % (SUCCESS, 1433166610, 1, 1433166609))[0]\n self.assertEquals(self.query_api.get_job_status(successful_job), SUCCESS)", "def test_failed_job(self):\n failed_job = json.loads(BASE_JSON % (FAILURE, 1433166610, 1, 1433166609))[0]\n self.assertEquals(self.query_api.get_job_status(failed_job), FAILURE)", "def finish(self):\n for msg, info in self.errors.iteritems():\n hosts = [ self.job_to_str_func(job) for job in info['jobs'] ]\n\n max_jobs_num = self.max_jobs_num\n if max_jobs_num < 0 or max_jobs_num > len(hosts):\n hosts_msg = ': %s' % ' '.join(hosts)\n elif max_jobs_num == 0:\n hosts_msg = ''\n else:\n hosts_msg = ': %s (and %s more)' % (' '.join(sorted(hosts)[:self.max_jobs_num]), \\\n len(hosts) - self.max_jobs_num)\n\n ex = info['exception']\n msg = '%s.%s: %s' % (ex.__class__.__module__, \\\n ex.__class__.__name__, \\\n str(ex).split('\\n')[0])\n print >> self.outfile, \"Exception '%s' in %s jobs%s.\" % (msg, len(hosts), hosts_msg)\n print >> self.outfile, exception_description(ex).strip()\n if info['trace'] != None:\n print >> self.outfile, 'Traceback:'\n print >> self.outfile, ''.join(info['trace'])\n\n print >> self.outfile", "def get_exitcode(self, update=True):\n _logme.log('Getting exitcode', 'debug')\n if self.done and self._got_exitcode:\n _logme.log('Getting exitcode from _exitcode', 'debug')\n return self._exitcode\n if update and not self._updating and not self.done:\n self.update()\n if not self.done:\n _logme.log('Job is not complete, no exit code yet', 'info')\n return None\n _logme.log('Getting exitcode from queue', 'debug')\n if not self.queue_info:\n self.queue_info = self.queue[self.id]\n if hasattr(self.queue_info, 'exitcode'):\n code = self.queue_info.exitcode\n else:\n code = None\n _logme.log('No exitcode even though the job is done, this ' +\n \"shouldn't happen.\", 'warn')\n self._exitcode = code\n self._got_exitcode = True\n return code", "def test_jobs_successful(self):\n\n workspace = storage_test_utils.create_workspace()\n file1 = storage_test_utils.create_file()\n data_dict = {\n 'version': '1.0',\n 'input_data': [{\n 'name': 'INPUT_IMAGE',\n 'file_id': file1.id\n }],\n 'output_data': [{\n 'name': 'output_file_pngs',\n 'workspace_id': workspace.id\n }]}\n\n secret_configuration = {\n 'version': '6',\n 'priority': 50,\n 'output_workspaces': {'default': storage_test_utils.create_workspace().name},\n 'settings': {\n 'DB_HOST': 'som.host.name',\n 'DB_PASS': 'secret_password'\n }\n }\n\n seed_job_type = job_test_utils.create_seed_job_type(configuration=secret_configuration)\n seed_job = job_test_utils.create_job(job_type=seed_job_type, status='RUNNING', input=data_dict)\n\n url = '/%s/jobs/%d/' % (self.api, seed_job.id)\n response = self.client.generic('GET', url)\n result = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n self.assertEqual(result['configuration']['priority'],50)\n self.assertNotIn('DB_PASS', result['configuration']['settings'])", "def CheckIfJobFinished(jobid, numseq, to_email, g_params): # {{{\n bsname = \"job_final_process\"\n path_result = os.path.join(g_params['path_static'], 'result')\n rstdir = os.path.join(path_result, jobid)\n gen_logfile = g_params['gen_logfile']\n gen_errfile = g_params['gen_errfile']\n name_server = g_params['name_server']\n g_params['jobid'] = jobid\n g_params['numseq'] = numseq\n g_params['to_email'] = to_email\n jsonfile = os.path.join(rstdir, f\"{bsname}.json\")\n myfunc.WriteFile(json.dumps(g_params, sort_keys=True), jsonfile, \"w\")\n binpath_script = os.path.join(g_params['webserver_root'], \"env\", \"bin\")\n\n finished_idx_file = \"%s/finished_seqindex.txt\"%(rstdir)\n failed_idx_file = \"%s/failed_seqindex.txt\"%(rstdir)\n py_scriptfile = os.path.join(binpath_script, f\"{bsname}.py\")\n finished_idx_list = []\n failed_idx_list = []\n if os.path.exists(finished_idx_file):\n finished_idx_list = list(set(myfunc.ReadIDList(finished_idx_file)))\n if os.path.exists(failed_idx_file):\n failed_idx_list = list(set(myfunc.ReadIDList(failed_idx_file)))\n\n lockname = f\"{bsname}.lock\"\n lock_file = os.path.join(g_params['path_result'], g_params['jobid'],\n lockname)\n\n num_processed = len(finished_idx_list)+len(failed_idx_list)\n if num_processed >= numseq: # finished\n if ('THRESHOLD_NUMSEQ_CHECK_IF_JOB_FINISH' in g_params\n and numseq <= g_params['THRESHOLD_NUMSEQ_CHECK_IF_JOB_FINISH']):\n cmd = [\"python\", py_scriptfile, \"-i\", jsonfile]\n (isSubmitSuccess, t_runtime) = webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n elif not os.path.exists(lock_file):\n bash_scriptfile = f\"{rstdir}/{bsname},{name_server},{jobid}.sh\"\n code_str_list = []\n code_str_list.append(\"#!/bin/bash\")\n cmdline = f\"python {py_scriptfile} -i {jsonfile}\"\n code_str_list.append(cmdline)\n code = \"\\n\".join(code_str_list)\n myfunc.WriteFile(code, bash_scriptfile, mode=\"w\", isFlush=True)\n os.chmod(bash_scriptfile, 0o755)\n os.chdir(rstdir)\n cmd = ['sbatch', bash_scriptfile]\n cmdline = \" \".join(cmd)\n verbose = False\n if 'DEBUG' in g_params and g_params['DEBUG']:\n verbose = True\n webcom.loginfo(\"Run cmdline: %s\"%(cmdline), gen_logfile)\n (isSubmitSuccess, t_runtime) = webcom.RunCmd(cmd, gen_logfile, gen_errfile, verbose)\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"isSubmitSuccess: %s\"%(str(isSubmitSuccess)), gen_logfile)", "def refresh_queue_status(self):\n \n # Get the jobid and state for all jobs pending/running/completed for the current user\n qacct_stdout=self.run_grid_command_resubmit([\"qacct\",\"-o\",getpass.getuser(),\"-j\",\"*\"])\n \n # info list should include jobid, state, cpus, time, and maxrss\n info=[]\n job_status=[]\n for line in qacct_stdout.split(\"\\n\"):\n if line.startswith(\"jobnumber\") or line.startswith(\"job_number\"):\n if job_status:\n info.append(job_status)\n job_status=[line.rstrip().split()[-1],\"NA\",\"NA\",\"NA\",\"NA\"]\n # get the states for completed jobs\n elif line.startswith(\"failed\"):\n failed_code = line.rstrip().split()[1]\n if failed_code != \"0\":\n if failed_code in [\"37\",\"100\"]:\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n elif line.startswith(\"deleted_by\"):\n if line.rstrip().split()[-1] != \"NONE\" and job_status[1] == self.job_code_terminated:\n job_status[1]=self.job_code_deleted\n elif line.startswith(\"exit_status\"):\n # only record if status has not yet been set\n if job_status[1] == \"NA\":\n exit_status = line.rstrip().split()[-1]\n if exit_status == \"0\":\n job_status[1]=self.job_code_completed\n elif exit_status == \"137\":\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n # get the current state for running jobs\n elif line.startswith(\"job_state\"):\n job_status[1]=line.rstrip().split()[-1]\n elif line.startswith(\"slots\"):\n job_status[2]=line.rstrip().split()[-1]\n elif line.startswith(\"ru_wallclock\"):\n try:\n # get the elapsed time in minutes\n job_status[3]=str(float(line.rstrip().split()[-1])/60.0)\n except ValueError:\n job_status[3]=\"NA\"\n elif line.startswith(\"ru_maxrss\"):\n job_status[4]=line.rstrip().split()[-1]+\"K\"\n \n if job_status:\n info.append(job_status)\n\n return info", "def check_result(self, params, server):\n if server['building']:\n # I assume Server and client are on the same TimeZone\n # the API doesn't tell me where is the server (only /systemInfo)\n job_started = datetime.fromtimestamp(int(server['timestamp']) / 1000)\n time_delta = (params['now'] - job_started)\n\n # New in version 2.7 --> datetime.timedelta.total_seconds\n # we want python >= 2.4 so we will do it ourselves\n seconds_since_start = time_delta.seconds + time_delta.days * 86400\n job_duration = self.seconds2human(seconds_since_start)\n if (seconds_since_start >= params['critical'] * 60):\n msg = '%s has been running for %s, see %sconsole#footer' % (\n params['job'],\n job_duration,\n server['url'])\n status = 'CRITICAL'\n elif (seconds_since_start >= params['warning'] * 60):\n msg = '%s has been running for %s, see %sconsole#footer' % (\n params['job'],\n job_duration,\n server['url'])\n status = 'WARNING'\n else:\n msg = '%s still running after %s, watch it on %sconsole#footer' % (\n params['job'],\n job_duration,\n server['url'])\n status = 'OK'\n else:\n # Easy part, the job has completed ...\n if server['result'] == 'SUCCESS':\n duration = self.seconds2human(server['duration'] / 1000)\n msg = '%s exited normally after %s' % (params['job'], duration)\n status = 'OK'\n\n elif server['result'] == 'UNSTABLE':\n duration = self.seconds2human(server['duration'] / 1000)\n msg = '%s is marked as unstable after %s, see %sconsole#footer' % (\n params['job'], duration, server['url'])\n status = 'WARNING'\n\n elif server['result'] == 'FAILURE':\n msg = '%s exited with an error, see %sconsole#footer' % (\n params['job'], server['url'])\n status = 'CRITICAL'\n\n elif server['result'] == 'ABORTED':\n msg = '%s has been aborted, see %sconsole#footer' % (\n params['job'], server['url'])\n status = 'UNKNOWN'\n else:\n # If you get there, patch welcome\n msg = '%s is in a not known state, Jenkins API issue ? see %s' % (\n params['job'], server['url'])\n status = 'UNKNOWN'\n\n return(status, msg)", "def __parse_result(self, runid, html, need_extra_info=True) :\n ret = {}\n ret['origin_runid'] = runid\n\n # get first row\n status_m = re.match(\".+?(<tr class=\\\"rowOdd\\\">.*?</tr>)\", html, re.DOTALL)\n if not status_m :\n logging.error(\"Failed to get status row.\")\n return None\n status = status_m.group(1)\n\n # get result\n result_m = re.match(r'.+?<td class=\"runId\">(\\d+)</td>.*?<td class=\"runJudgeStatus\".*?<span.*?>(.*?)</span>.*?<td class=\"runTime\".*?>(.*?)</td>.*<td class=\"runMemory\".*?>(.*?)</td>', status, re.DOTALL)\n if not result_m :\n wf(\"parse_result_status\", status)\n logging.error(\"Failed to get current result.\")\n return None\n ret['origin_runid'] = result_m.group(1).strip()\n\n if None != runid and runid != ret['origin_runid'] :\n return None\n\n result = result_m.group(2).strip()\n cvtRes = Judge.ConvertResult(result)\n ret['result_id'] = cvtRes[0]\n ret['result'] = cvtRes[1]\n\n ret['time'] = str(int(result_m.group(3).strip()))\n ret['memory'] = str(int(result_m.group(4).strip()))\n\n ret['_is_end'] = Judge.IsFinalResult(result)\n\n if need_extra_info and oj.Judge_CE == ret['result_id'] :\n ce_m = re.match(r'.+?showJudgeComment\\.do\\?submissionId=([0-9]*)', status, re.DOTALL) \n if ce_m :\n ce_id = ce_m.group(1).strip()\n ret['ce_id'] = ce_id\n ret['extra_info'] = self.__extra_info(ce_id)\n else :\n ret['extra_info'] = \"No CE ID\"\n\n return ret", "def execute():\r\n arcpy.AddMessage(\"START BCA Processing\")\r\n arcpy.env.workspace = config.temp_data_gdb\r\n arcpy.env.overwriteOutput = True\r\n sys.path.append(config.notif_system_script_folder)\r\n\r\n # Other Variables\r\n arcpy.AddMessage(\"Import toolbox\")\r\n arcpy.ImportToolbox(config.notif_toolbox)\r\n REGEX_FOR_INVALID_CHARS = re.compile(r'[^0-9a-zA-Z]+')\r\n todayDate = datetime.datetime.now().strftime(\"%Y%m%d\")\r\n logFile = file(\r\n config.report_processing_log + \"\\\\\" + todayDate + \"_NotificationSystemLog\" + \".txt\", \"a\")\r\n\r\n\r\n # get all unzipped files uploaded to shared folder\r\n configfiles = [os.path.join(dirpath, f)\r\n for dirpath, dirnames, files in os.walk(config.SharedFolder)\r\n for f in files if f.endswith('.csv') or f.endswith('.xls') or f.endswith('.xlsx') or f.endswith('.XLS')]\r\n\r\n correct_config_files = [f for f in configfiles if \"\\BCAWeeklyPermitReport\\\\\" in f]\r\n\r\n # PREPARE workspace\r\n arcpy.AddMessage(\"Preparing workspace...\")\r\n for BCAreport in correct_config_files:\r\n\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n\r\n MukimConstruct = arcpy.SearchCursor(config.MukimConstructSource)\r\n PermitDateExists = False\r\n\r\n for row in MukimConstruct:\r\n aux = input_file_name[:8]\r\n if \"CORRECTED\" not in BCAreport.upper():\r\n filedate = datetime.datetime.strptime(aux, \"%Y%m%d\")\r\n else:\r\n clean_filename = input_file_name.split(\".\")[0]\r\n filedate = datetime.datetime.strptime(clean_filename[-8:], \"%Y%m%d\")\r\n if filedate == row.PERMIT_DATE and \"CORRECTED\" not in BCAreport.upper():\r\n PermitDateExists = True\r\n break\r\n if PermitDateExists and \"CORRECTED\" not in BCAreport.upper():\r\n PermitDateExistsLog = file(\r\n config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] +\r\n \" file's Permit Date already exists\" + \".log\",\r\n \"a\")\r\n PermitDateExistsLog.write(\r\n \"Permit Date for the file \" + input_file_name + \" already exists in Mukim Construct at \" + str(\r\n datetime.datetime.now()))\r\n logFile.writelines(\r\n \"Permit Date for the file \" + input_file_name + \" already exists in Mukim Construct at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n\r\n else:\r\n\r\n # 00. Creation of geodatabases that will serve as workspaces\r\n logFile.writelines(\"00 Creation of temp gdb starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n if arcpy.Exists(config.TempDataGDB):\r\n arcpy.Delete_management(config.TempDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Temp_data.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Temp_data.gdb\")\r\n\r\n if arcpy.Exists(config.SDEDataGDB):\r\n arcpy.Delete_management(config.SDEDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Source.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Source.gdb\")\r\n\r\n if arcpy.Exists(config.CurrentMukimConstructDataGDB):\r\n arcpy.Delete_management(config.CurrentMukimConstructDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Final_data.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Final_data.gdb\")\r\n\r\n logFile.writelines(\"00 Creation of temp gdb ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 01. Import the base data\r\n logFile.writelines(\"01 Import of base data starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.CurrentMukimConstructDataGDB,\r\n \"MUKIM_CONSTRUCT\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructByProjSource, config.CurrentMukimConstructDataGDB,\r\n \"MUKIM_CONSTRUCT_BYPROJ\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.DepotSource, config.SDEDataGDB, \"DepotBoundary\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.CatchmentSource, config.SDEDataGDB, \"CatchmentBoundary\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.LandlotSource, config.TempDataGDB, \"Land_lot\", \"\", \"\", \"\")\r\n # Calculate the lot key without letter\r\n arcpy.AddField_management(config.LandLot, \"Lotkey_wo_letter\", \"TEXT\", \"\", \"\", \"10\", \"\", \"NULLABLE\", \"NON_REQUIRED\",\r\n \"\")\r\n arcpy.CalculateField_management(config.LandLot, \"Lotkey_wo_letter\", \"!lot_key![:10]\", \"PYTHON\", \"\")\r\n\r\n logFile.writelines(\"01 Import of base data ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n\r\n # START THE LOOP TO PROCESS ALL THE FILES\r\n clcounter = 0\r\n\r\n if len(correct_config_files) == 0:\r\n logFile.writelines(\"No BCA report to process at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n arcpy.AddMessage(\"Processing files...\")\r\n for BCAreport in configfiles:\r\n\r\n clcounter += 1\r\n arcpy.AddMessage(BCAreport)\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n MukimConstruct = arcpy.SearchCursor(config.MukimConstructSource)\r\n PermitDateExists = False\r\n\r\n # CHEKC FILE DATE EXISTS\r\n for row in MukimConstruct:\r\n aux = input_file_name[:8]\r\n if \"CORRECTED\" not in BCAreport.upper():\r\n filedate = datetime.datetime.strptime(aux, \"%Y%m%d\")\r\n else:\r\n clean_filename = input_file_name.split(\".\")[0]\r\n filedate = datetime.datetime.strptime(clean_filename[-8:], \"%Y%m%d\")\r\n if filedate == row.PERMIT_DATE and \"CORRECTED\" not in input_file_name.upper():\r\n PermitDateExists = True\r\n break\r\n\r\n HEADERVALID = True\r\n with xlrd.open_workbook(BCAreport) as wb:\r\n sh = wb.sheet_by_index(0)\r\n for r in range(sh.nrows):\r\n colcount = 0\r\n if sh.row_values(r)[colcount] == 'Error_Message':\r\n HEADERVALID = True\r\n elif sh.row_values(r)[colcount] == 'Project Ref No' or sh.row_values(r)[colcount] == 'Project_Ref_No':\r\n HEADERVALID = True\r\n else:\r\n PermitDateExistsLog = file(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[\r\n 0] + \" file's header format is not acceptable for processing\" + \".log\", \"a\")\r\n PermitDateExistsLog.write(\r\n \"The header format for the file \" + input_file_name + \" is not acceptable for processing at \" + str(\r\n datetime.datetime.now()))\r\n logFile.writelines(\r\n \"The header format for the file \" + input_file_name + \" is not acceptable for processing at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n HEADERVALID = False\r\n break\r\n\r\n if not PermitDateExists and HEADERVALID:\r\n logFile.writelines(\"Starts processing \" + BCAreport + \" at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # Status update to run/not run the SiteInspection Update\r\n Log_SiteInspectionUpdate = file(config.SiteInspectionUpdate, \"w\")\r\n Log_SiteInspectionUpdate.writelines(\"NO\")\r\n Log_SiteInspectionUpdate.close()\r\n\r\n # 02. Import the BCA report to a geodatabase table\r\n logFile.writelines(\"02 Import of table to gdb starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\ConvertedBCAreport_02\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\ConvertedBCAreport_02\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"ConvertedBCAreport_02\", config.TemplateConvertedBCAreport)\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"ConvertedBCAreport_02\", config.TemplateConvertedBCAreport)\r\n if arcpy.Exists(BCAreport[:-5] + '_err' + '.csv'):\r\n # rename old error report\r\n os.remove(BCAreport[:-5] + '_err' + '.csv')\r\n else:\r\n result = \"Error file does not exist\"\r\n if BCAreport.endswith('.xls') or BCAreport.endswith('.xlsx') or BCAreport.endswith('.XLS'):\r\n rows_out = arcpy.InsertCursor(config.BCAReportGDBTable)\r\n fldlist = arcpy.ListFields(config.BCAReportGDBTable)\r\n fldlist.pop(0)\r\n with xlrd.open_workbook(BCAreport) as wb:\r\n sh = wb.sheet_by_index(0)\r\n for r in range(sh.nrows):\r\n colcount = 0\r\n if sh.row_values(r)[colcount] != 'Error_Message':\r\n colcount = 0\r\n else:\r\n colcount = 1\r\n break\r\n for r in range(sh.nrows):\r\n colcounter = colcount\r\n if r > 0:\r\n new_row_out = rows_out.newRow()\r\n for efld in fldlist:\r\n if efld.name <> 'OBJECTID' and efld.name <> 'ConcatFields':\r\n new_row_out.setValue(efld.name, sh.row_values(r)[colcounter])\r\n colcounter += 1\r\n\r\n logFile.writelines(\"Inserting: \" + str(new_row_out) + \"\\n\")\r\n rows_out.insertRow(new_row_out)\r\n del rows_out, new_row_out\r\n\r\n elif BCAreport.endswith('.csv'):\r\n\r\n BCAreportread = csv.DictReader(open(BCAreport, 'rb'), delimiter=',', quotechar='\"')\r\n rows_out = arcpy.InsertCursor(config.BCAReportGDBTable)\r\n for attribute in BCAreportread:\r\n new_row_out = rows_out.newRow()\r\n new_row_out.Project_Ref_No = attribute['Project_Ref_No']\r\n new_row_out.Project_Title = attribute['Project_Title']\r\n new_row_out.House_Blk_No = attribute['House_Blk_No']\r\n new_row_out.Road_Name = attribute['Road_Name']\r\n new_row_out.Level_No = attribute['Level_No']\r\n new_row_out.Unit_No = attribute['Unit_No']\r\n new_row_out.Building_Name = attribute['Building_Name']\r\n new_row_out.Postal_Code = attribute['Postal_Code']\r\n new_row_out.Project_Mukim_nos = attribute['Project_Mukim_nos']\r\n new_row_out.Project_Lot_nos = attribute['Project_Lot_nos']\r\n new_row_out.Permit_Type_of_Work = attribute['Permit_Type_of_Work']\r\n new_row_out.Type_of_Work = attribute['Type_of_Work']\r\n new_row_out.Owner_s_name = attribute['Owners_name']\r\n new_row_out.Owner_s_firm_name = attribute['Owners_firm_name']\r\n new_row_out.Owner_s_address = attribute['Owners_address']\r\n new_row_out.Owner_s_Tel_No = attribute['Owners_Tel_No']\r\n new_row_out.Owner_s_Email_address = attribute['Owners_Email_address']\r\n new_row_out.Builder_s_name = attribute['Builders_name']\r\n new_row_out.Builder_s_firm_name = attribute['Builders_firm_name']\r\n new_row_out.Builder_s_address = attribute['Builders_address']\r\n new_row_out.Builder_s_Tel_No = attribute['Builders_Tel_No']\r\n new_row_out.Builder_s_email_address = attribute['Builders_email_address']\r\n new_row_out.PE_s_name = attribute['PEs_name']\r\n new_row_out.PE_s_firm_name = attribute['PEs_firm_name']\r\n new_row_out.PE_s_address = attribute['PEs_address']\r\n new_row_out.PE_s_Tel_No = attribute['PEs_Tel_No']\r\n new_row_out.PE_s_Email_address = attribute['PEs_Email_address']\r\n new_row_out.Architect_s_name = attribute['Architects_name']\r\n new_row_out.Architect_s_firm_name = attribute['Architects_firm_name']\r\n new_row_out.Architect_s_address = attribute['Architects_address']\r\n new_row_out.Architect_s_Tel_No = attribute['Architects_Tel_No']\r\n new_row_out.Architect_s_Email_address = attribute['Architects_Email_address']\r\n new_row_out.Project_Cost = attribute['Project_Cost']\r\n new_row_out.Project_Duration = attribute['Project_Duration']\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = attribute['Approval_Date']\r\n rows_out.insertRow(new_row_out)\r\n if new_row_out:\r\n del new_row_out\r\n if rows_out:\r\n del rows_out\r\n\r\n except:\r\n log_error(\"Error in 02 Import of table to gdb: \", logFile)\r\n logFile.writelines(\"02 Import of table to gdb ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 03. Remove spaces in key fields for the concatenation\r\n logFile.writelines(\"03 Removing of spaces starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsSpace = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n\r\n for row in rowsSpace:\r\n ProjRef = row.Project_Ref_No.strip()\r\n ProjMukim = row.Project_Mukim_nos.strip()\r\n ProjLot = row.Project_Lot_nos.strip()\r\n BuilderN = row.Builder_s_name.strip()\r\n row.Project_Ref_No = ProjRef\r\n row.Project_Mukim_nos = ProjMukim\r\n row.Project_Lot_nos = ProjLot\r\n row.Builder_s_name = BuilderN\r\n rowsSpace.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpace:\r\n del rowsSpace\r\n except:\r\n log_error(\"Error in 03 Removing of spaces: \", logFile)\r\n logFile.writelines(\"03 Removing of spaces ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 04. Concatenate Project_Ref_No, Project_Mukim_nos, Project_Lot_nos, Builder_s_name\r\n logFile.writelines(\"04 Concatenate the three fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rows = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n for row in rows:\r\n expression = str(row.Project_Ref_No) + \"-\" + str(row.Project_Mukim_nos) + \"-\" + str(\r\n row.Project_Lot_nos) + \"-\" + str(row.Builder_s_name)\r\n row.ConcatFields = expression\r\n rows.updateRow(row)\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n\r\n except:\r\n log_error(\"Error in 04 Concatenate the three fields: \", logFile)\r\n logFile.writelines(\"04 Concatenate the three fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 05. Create temporary tables for Unique and Duplicate records\r\n logFile.writelines(\"05 Create temporary tables starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\Uniquerows\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\Uniquerows\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Uniquerows\", config.TemplateConcat, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Uniquerows\", config.TemplateConcat, \"\")\r\n\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\Duplicaterows\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\Duplicaterows\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Duplicaterows\", config.TemplateConcat, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Duplicaterows\", config.TemplateConcat, \"\")\r\n except:\r\n log_error(\"Error in 05 Create temporary tables: \", logFile)\r\n logFile.writelines(\"05 Create temporary tables ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 06. Separate unique and duplicate records\r\n logFile.writelines(\"06 Separate unique and duplicate rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n print \"Start step 06\"\r\n rows_inCB02 = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n rows_outUnique = arcpy.InsertCursor(config.UniqueRecords)\r\n # print rows_outUnique\r\n rows_outDuplicate = arcpy.InsertCursor(config.DuplicateRecords)\r\n\r\n rows_unique = []\r\n rows_duplicates = []\r\n for row in rows_inCB02:\r\n if row.ConcatFields not in rows_unique:\r\n rows_unique = rows_unique + [row.ConcatFields]\r\n else:\r\n rows_duplicates = rows_duplicates + [row.ConcatFields]\r\n\r\n print \"Start step 06 1\"\r\n for item in rows_unique:\r\n print \"clcounter: \" + str(clcounter)\r\n print \"item: \" + str(item)\r\n newrow = rows_outUnique.newRow()\r\n newrow.Concat = item\r\n # print newrow\r\n rows_outUnique.insertRow(newrow)\r\n\r\n print \"Start step 06 2\"\r\n for item in rows_duplicates:\r\n print \"clcounter: \" + str(clcounter)\r\n print \"item: \" + str(item)\r\n newrow = rows_outDuplicate.newRow()\r\n newrow.Concat = item\r\n rows_outDuplicate.insertRow(newrow)\r\n\r\n print \"Start step 06 3\"\r\n\r\n if rows_inCB02:\r\n del rows_inCB02\r\n if rows_outUnique:\r\n del rows_outUnique\r\n if rows_outDuplicate:\r\n del rows_outDuplicate\r\n if row:\r\n del row\r\n except:\r\n log_error(\"Error in 06 Separate unique and duplicate rows: \", logFile)\r\n logFile.writelines(\"06 Separate unique and duplicate rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 07. Get the rest of the fields for Uniquerows table\r\n logFile.writelines(\r\n \"07 Get the rest of the fields for unique rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.workspace = config.TempDataGDB\r\n arcpy.AddMessage(\"Starting toolbox JoinUniqueRestofFields\")\r\n\r\n try:\r\n arcpy.JoinUniqueRestofFields()\r\n except:\r\n log_error(\"Error in 07 Get the rest of the fields for unique rows: \", logFile)\r\n logFile.writelines(\r\n \"07 Get the rest of the fields for unique rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 08. Get the rest of the fields for Duplicaterows table\r\n logFile.writelines(\r\n \"08 Get the rest of the fields for duplicate rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"START toolbox JoinDuplicateRestofFields\")\r\n try:\r\n arcpy.JoinDuplicateRestofFields()\r\n\r\n except:\r\n log_error(\"Error in 08 Get the rest of the fields for duplicate rows: \", logFile)\r\n\r\n logFile.writelines(\r\n \"08 Get the rest of the fields for duplicate rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 09. Log duplicate records\r\n logFile.writelines(\"09 Log duplicate records starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"Logging duplicate records\")\r\n try:\r\n # Initialize the error log\r\n wbk = xlwt.Workbook()\r\n sheet = wbk.add_sheet('Book 1')\r\n row_count = 0\r\n col_count = 0\r\n header = ['Error_Message', 'Project_Ref_No', 'Project_Title', 'House_Blk_No', 'Road_Name', 'Level_No',\r\n 'Unit_No', 'Building_Name', 'Postal_Code', 'Project_Mukim_nos', 'Project_Lot_nos',\r\n 'Permit_Type_of_Work', 'Type_of_Work', 'Owners_name', 'Owners_firm_name', 'Owners_address',\r\n 'Owners_Tel_No', 'Owners_Email_address', 'Builders_name', 'Builders_firm_name',\r\n 'Builders_address', 'Builders_Tel_No', 'Builders_email_address', 'PEs_name', 'PEs_firm_name',\r\n 'PEs_address', 'PEs_Tel_No', 'PEs_Email_address', 'Architects_name', 'Architects_firm_name',\r\n 'Architects_address', 'Architects_Tel_No', 'Architects_Email_address', 'Project_Cost',\r\n 'Project_Duration', 'Approval_Date']\r\n for fieldname in header:\r\n sheet.write(row_count, col_count, fieldname)\r\n col_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n # Log duplicate records\r\n rows = arcpy.SearchCursor(config.DuplicateRows)\r\n\r\n row_count = 1\r\n col_count = 0\r\n row = None\r\n for row in rows:\r\n message = ['Duplicate record in the BCA report', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n except:\r\n log_error(\"Error in 09 Log duplicate records: \", logFile)\r\n\r\n logFile.writelines(\"09 Log duplicate records ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 10. Split rows based on Mukim numbers\r\n logFile.writelines(\"10 Splitting of rows based on mukim starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.SplittedMukimRows):\r\n arcpy.Delete_management(config.SplittedMukimRows)\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_mukim_03\", config.TemplateBCAReport, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_mukim_03\", config.TemplateBCAReport, \"\")\r\n\r\n if arcpy.Exists(config.SplittedProjLotRows):\r\n arcpy.Delete_management(config.SplittedProjLotRows)\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_projlot_04\", config.TemplateBCAReport, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_projlot_04\", config.TemplateBCAReport, \"\")\r\n\r\n rows_in = arcpy.SearchCursor(config.UniqueRows)\r\n rows_out = arcpy.InsertCursor(config.SplittedMukimRows)\r\n\r\n for row in rows_in:\r\n list_mukim_nos = row.Project_Mukim_nos.split(\",\")\r\n for proj_mukim_nos_id in list_mukim_nos:\r\n new_row_out = rows_out.newRow()\r\n new_row_out.Project_Mukim_nos = proj_mukim_nos_id\r\n new_row_out.PROJECTMUKIM_RAW = row.Project_Mukim_nos\r\n new_row_out.Project_Ref_No = row.Project_Ref_No\r\n new_row_out.Project_Title = row.Project_Title\r\n new_row_out.House_Blk_No = row.House_Blk_No\r\n new_row_out.Road_Name = row.Road_Name\r\n new_row_out.Level_No = row.Level_No\r\n new_row_out.Unit_No = row.Unit_No\r\n new_row_out.Building_Name = row.Building_Name\r\n new_row_out.Postal_Code = row.Postal_Code\r\n new_row_out.Project_Lot_nos = row.Project_Lot_nos\r\n new_row_out.Permit_Type_of_Work = row.Permit_Type_of_Work\r\n new_row_out.Type_of_Work = row.Type_of_Work\r\n new_row_out.Owner_s_name = row.Owner_s_name\r\n new_row_out.Owner_s_firm_name = row.Owner_s_firm_name\r\n new_row_out.Owner_s_address = row.Owner_s_address\r\n new_row_out.Owner_s_Tel_No = row.Owner_s_Tel_No\r\n new_row_out.Owner_s_Email_address = row.Owner_s_Email_address\r\n new_row_out.Builder_s_name = row.Builder_s_name\r\n new_row_out.Builder_s_firm_name = row.Builder_s_firm_name\r\n new_row_out.Builder_s_address = row.Builder_s_address\r\n new_row_out.Builder_s_Tel_No = row.Builder_s_Tel_No\r\n new_row_out.Builder_s_email_address = row.Builder_s_email_address\r\n new_row_out.PE_s_name = row.PE_s_name\r\n new_row_out.PE_s_firm_name = row.PE_s_firm_name\r\n new_row_out.PE_s_address = row.PE_s_address\r\n new_row_out.PE_s_Tel_No = row.PE_s_Tel_No\r\n new_row_out.PE_s_Email_address = row.PE_s_Email_address\r\n new_row_out.Architect_s_name = row.Architect_s_name\r\n new_row_out.Architect_s_firm_name = row.Architect_s_firm_name\r\n new_row_out.Architect_s_address = row.Architect_s_address\r\n new_row_out.Architect_s_Tel_No = row.Architect_s_Tel_No\r\n new_row_out.Architect_s_Email_address = row.Architect_s_Email_address\r\n new_row_out.Project_Cost = row.Project_Cost\r\n new_row_out.Project_Duration = row.Project_Duration\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = row.Approval_Date_DD_MM_YYYY_\r\n rows_out.insertRow(new_row_out)\r\n if row:\r\n del row\r\n if new_row_out:\r\n del new_row_out\r\n if rows_in:\r\n del rows_in\r\n if rows_out:\r\n del rows_out\r\n except:\r\n log_error(\"Error in 10 Splitting of rows based on mukim: \", logFile)\r\n\r\n logFile.writelines(\"10 Splitting of rows based on mukim ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 11.Split rows based on Project lot numbers\r\n arcpy.AddMessage(\"Splitting rows based on project lots\")\r\n\r\n logFile.writelines(\r\n \"11 Splitting of rows based on project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rows_in03 = arcpy.SearchCursor(config.SplittedMukimRows)\r\n rows_out04 = arcpy.InsertCursor(config.SplittedProjLotRows)\r\n\r\n for row in rows_in03:\r\n list_proj_lot_nos = row.Project_Lot_nos.split(\",\")\r\n print list_proj_lot_nos\r\n for proj_lot_nos_id in list_proj_lot_nos:\r\n print proj_lot_nos_id\r\n new_row_out = rows_out04.newRow()\r\n new_row_out.Project_Lot_nos = proj_lot_nos_id\r\n new_row_out.PROJECTMUKIM_RAW = row.PROJECTMUKIM_RAW\r\n new_row_out.PROJECTLOT_RAW = row.Project_Lot_nos\r\n new_row_out.Project_Ref_No = row.Project_Ref_No\r\n new_row_out.Project_Title = row.Project_Title\r\n new_row_out.House_Blk_No = row.House_Blk_No\r\n new_row_out.Road_Name = row.Road_Name\r\n new_row_out.Level_No = row.Level_No\r\n new_row_out.Unit_No = row.Unit_No\r\n new_row_out.Building_Name = row.Building_Name\r\n new_row_out.Postal_Code = row.Postal_Code\r\n new_row_out.Project_Mukim_nos = row.Project_Mukim_nos\r\n new_row_out.Permit_Type_of_Work = row.Permit_Type_of_Work\r\n new_row_out.Type_of_Work = row.Type_of_Work\r\n new_row_out.Owner_s_name = row.Owner_s_name\r\n new_row_out.Owner_s_firm_name = row.Owner_s_firm_name\r\n new_row_out.Owner_s_address = row.Owner_s_address\r\n new_row_out.Owner_s_Tel_No = row.Owner_s_Tel_No\r\n new_row_out.Owner_s_Email_address = row.Owner_s_Email_address\r\n new_row_out.Builder_s_name = row.Builder_s_name\r\n new_row_out.Builder_s_firm_name = row.Builder_s_firm_name\r\n new_row_out.Builder_s_address = row.Builder_s_address\r\n new_row_out.Builder_s_Tel_No = row.Builder_s_Tel_No\r\n new_row_out.Builder_s_email_address = row.Builder_s_email_address\r\n new_row_out.PE_s_name = row.PE_s_name\r\n new_row_out.PE_s_firm_name = row.PE_s_firm_name\r\n new_row_out.PE_s_address = row.PE_s_address\r\n new_row_out.PE_s_Tel_No = row.PE_s_Tel_No\r\n new_row_out.PE_s_Email_address = row.PE_s_Email_address\r\n new_row_out.Architect_s_name = row.Architect_s_name\r\n new_row_out.Architect_s_firm_name = row.Architect_s_firm_name\r\n new_row_out.Architect_s_address = row.Architect_s_address\r\n new_row_out.Architect_s_Tel_No = row.Architect_s_Tel_No\r\n new_row_out.Architect_s_Email_address = row.Architect_s_Email_address\r\n new_row_out.Project_Cost = row.Project_Cost\r\n new_row_out.Project_Duration = row.Project_Duration\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = row.Approval_Date_DD_MM_YYYY_\r\n rows_out04.insertRow(new_row_out)\r\n\r\n if row:\r\n del row\r\n if new_row_out:\r\n del new_row_out\r\n if rows_in03:\r\n del rows_in03\r\n if rows_out04:\r\n del rows_out04\r\n # print int(arcpy.GetCount_management(SplittedProjLotRows).getOutput(0))\r\n except:\r\n log_error(\"Error in 11 Splitting of rows based on project lot: \", logFile)\r\n logFile.writelines(\r\n \"11 Splitting of rows based on project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 12. Remove spaces in Mukim and Project lot values\r\n logFile.writelines(\r\n \"12 Removing of spaces in mukim and project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"Cleaning project lots\")\r\n try:\r\n\r\n rowsSpaces = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsSpaces:\r\n lot_no_spaces = row.Project_Lot_nos.strip()\r\n mukim_no_spaces = row.Project_Mukim_nos.strip()\r\n row.Project_Lot_nos = lot_no_spaces\r\n row.Project_Mukim_nos = mukim_no_spaces\r\n rowsSpaces.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpaces:\r\n del rowsSpaces\r\n except:\r\n log_error(\"Error in 12 Removing of spaces in mukim and project lot: \", logFile)\r\n logFile.writelines(\r\n \"12 Removing of spaces in mukim and project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 13. Log empty Mukimlot or date fields\r\n logFile.writelines(\r\n \"13 Log empty mukim and project lot nos starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsEmpty = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsEmpty:\r\n message = ['Missing Project lot or Mukim numbers', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n message2 = ['Missing Project duration or Approval date', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name,\r\n row.Architect_s_firm_name, row.Architect_s_address, row.Architect_s_Tel_No,\r\n row.Architect_s_Email_address, row.Project_Cost, row.Project_Duration,\r\n row.Approval_Date_DD_MM_YYYY_]\r\n if row.Project_Mukim_nos is None or (len(row.Project_Mukim_nos) < 4):\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n elif row.Project_Lot_nos is None or (len(row.Project_Lot_nos) == 0):\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n if row.Project_Duration is None or (len(row.Project_Duration) < 1):\r\n col_count = 0\r\n for element in message2:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n\r\n elif row.Approval_Date_DD_MM_YYYY_ is None or (len(row.Approval_Date_DD_MM_YYYY_) < 1):\r\n col_count = 0\r\n for element in message2:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rowsEmpty:\r\n del rowsEmpty\r\n except:\r\n log_error(\"Error in 13 Log for empty mukim and project lot nos: \", logFile)\r\n logFile.writelines(\"13 Log empty mukim and project lot nos ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 14. Error log for those with bad values\r\n arcpy.AddMessage(\"14 Logging bad values\")\r\n logFile.writelines(\"14 Log if bad values exist starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsBadValues = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsBadValues:\r\n message = ['Mukim or Project lot numbers have bad values', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n if len(REGEX_FOR_INVALID_CHARS.findall(row.Project_Lot_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n elif len(REGEX_FOR_INVALID_CHARS.findall(row.Project_Mukim_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n elif len(uptodigit(row.Project_Lot_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n if row:\r\n del row\r\n if rowsBadValues:\r\n del rowsBadValues\r\n except:\r\n log_error(\"Error in 14 Log if bad values exist: \", logFile)\r\n logFile.writelines(\"14 Log if bad values exist ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 15. Add zeros for Project Lot numbers\r\n logFile.writelines(\"15 Add zeros starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsZeros = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n letters = string.ascii_letters\r\n for row in rowsZeros:\r\n letter_count = len(filter(functools.partial(operator.contains, letters), row.Project_Lot_nos))\r\n filled_string = row.Project_Lot_nos.zfill(5 + letter_count)\r\n row.Project_Lot_nos = filled_string\r\n rowsZeros.updateRow(row)\r\n if row:\r\n del row\r\n if rowsZeros:\r\n del rowsZeros\r\n except:\r\n log_error(\"Error in 15 Add zeros: \", logFile)\r\n logFile.writelines(\"15 Add zeros ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 16. Add and populate fields Mukim_Lot_No, Mukimlot_wo_letter, and Permit_date\r\n logFile.writelines(\"16 Add and populate fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsPop = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n for row in rowsPop:\r\n expression = str(row.Project_Mukim_nos) + \"-\" + str(row.Project_Lot_nos)\r\n row.Mukim_Lot_No = expression\r\n date = filedate.strftime(\"%Y%m%d\")\r\n year = int(date[:4])\r\n month = int(date[4:6])\r\n day = int(date[6:8])\r\n permit_date = datetime.datetime(year, month, day)\r\n row.Permit_date = permit_date\r\n rowsPop.updateRow(row)\r\n if row:\r\n del row\r\n if rowsPop:\r\n del rowsPop\r\n # Calculate Mukimlot_wo_letter\r\n arcpy.CalculateField_management(config.SplittedProjLotRows, \"Mukimlot_wo_letter\", \"!Mukim_Lot_No![:10]\",\r\n \"PYTHON_9.3\", \"\")\r\n\r\n except:\r\n log_error(\"Error in 16 Add and populate fields: \", logFile)\r\n logFile.writelines(\"16 Add and populate fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 17.Match mukim lot and land lot\r\n logFile.writelines(\"17 Match mukim lot with landlot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.MatchMukimLandLot()\r\n except:\r\n log_error(\"Error in 17 Match mukim lot with landlot: \", logFile)\r\n logFile.writelines(\"17 Match mukim lot with landlot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 18.Get unmatched mukim lot with land lot\r\n logFile.writelines(\"18 Get unmatched mukim lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"18 Get unmatched mukim lot\")\r\n try:\r\n arcpy.GetUnmatchedMukimLot()\r\n\r\n except:\r\n log_error(\"Error in 18 Get unmatched mukim lot: \", logFile)\r\n\r\n logFile.writelines(\"18 Get unmatched mukim lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 19. Log errors for unmatched mukim lots\r\n logFile.writelines(\"19 Log unmatched mukim lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsUnmatched = arcpy.SearchCursor(config.UnmatchedMukimLot)\r\n row = None\r\n\r\n for row in rowsUnmatched:\r\n message = ['Unmatched mukim lot with the land lot', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rowsUnmatched:\r\n del rowsUnmatched\r\n\r\n with xlrd.open_workbook(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\") as wb:\r\n sh = wb.sheet_by_index(0)\r\n if sh.nrows == 1:\r\n os.remove(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n except arcpy.ExecuteError:\r\n log_error(\"Error in 19 Log unmatched mukim lot: \", logFile)\r\n logFile.writelines(\"19 Log unmatched mukim lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 20. Prepare the table for MukimConstruct matching (add required fields)\r\n logFile.writelines(\"20 Add fields to be used for matching starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n if arcpy.Exists(config.MUKIMCONSTRUCTImport):\r\n arcpy.Delete_management(config.MUKIMCONSTRUCTImport)\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.TempDataGDB,\r\n \"MUKIM_CONSTRUCT_Import\")\r\n else:\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.TempDataGDB,\r\n \"MUKIM_CONSTRUCT_Import\")\r\n\r\n arcpy.AddField_management(config.MatchedMukimLot, \"Concat_4fields\", \"Text\", \"\", \"\", \"\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCTImport, \"Concat_4fields\", \"Text\", \"\", \"\", \"\")\r\n arcpy.AddField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS2\", \"Double\", \"\", \"\", \"\")\r\n except:\r\n log_error(\"Error in 20 Add fields to be used for matching: \", logFile)\r\n logFile.writelines(\"20 Add fields to be used for matching ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 21. Calculate Project Duration as months\r\n logFile.writelines(\"21 Calculate PROJ_DURATION as months starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsProjDur = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsProjDur:\r\n durationstr = row.PROJ_DURATION_MTHS\r\n if \"Month\" in row.PROJ_DURATION_MTHS:\r\n durationintmth = int(durationstr.split(' ')[0])\r\n row.PROJ_DURATION_MTHS2 = durationintmth\r\n elif \"Year\" in row.PROJ_DURATION_MTHS:\r\n durationintyr = int(durationstr.split(' ')[0]) * 12\r\n row.PROJ_DURATION_MTHS2 = durationintyr\r\n rowsProjDur.updateRow(row)\r\n if rowsProjDur:\r\n del rowsProjDur\r\n if row:\r\n del row\r\n\r\n arcpy.DeleteField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\")\r\n arcpy.AddField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\", \"Double\")\r\n arcpy.CalculateField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\", \"[PROJ_DURATION_MTHS2]\")\r\n except:\r\n log_error(\"Error in 21 Calculate PROJ_DURATION as months: \", logFile)\r\n logFile.writelines(\"21 Calculate PROJ_DURATION as months ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 22. Concatenate 4 fields to be used in checking if mukimlot already exists in MUKIMCONSTRUCT\r\n logFile.writelines(\"22 Concatenate 4 fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsConcat1 = arcpy.UpdateCursor(config.MUKIMCONSTRUCTImport)\r\n\r\n for row in rowsConcat1:\r\n expression = str(row.PROJ_REF_NO) + \"-\" + str(row.BUILDER_NAME) + \"-\" + str(\r\n row.LOT_KEY) + \"-\" + str(row.PERMIT_DATE)\r\n row.Concat_4fields = expression\r\n rowsConcat1.updateRow(row)\r\n if row:\r\n del row\r\n if rowsConcat1:\r\n del rowsConcat1\r\n\r\n rowsConcat2 = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsConcat2:\r\n expression = str(row.PROJ_REF_NO) + \"-\" + str(row.BUILDER_NAME) + \"-\" + str(\r\n row.LOT_KEY) + \"-\" + str(row.PERMIT_DATE)\r\n row.Concat_4fields = expression\r\n rowsConcat2.updateRow(row)\r\n if row:\r\n del row\r\n if rowsConcat2:\r\n del rowsConcat2\r\n except:\r\n log_error(\"Error in 22 Concatenate 4 fields: \", logFile)\r\n logFile.writelines(\"22 Concatenate 4 fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 23.Match mukim lot with mukim construct\r\n logFile.writelines(\"23 Match mukimlot with mukim construct at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.workspace = config.TempDataGDB # \"G:\\\\Project\\\\GERIUPGRADE\\\\GPTools\\\\NotificationSysTools\\\\BCAReportProcessing\\\\Temp_data.gdb\"\r\n try:\r\n arcpy.MatchedMukimlotMukimConstruct()\r\n except:\r\n log_error(\"Error in 23 Match mukimlot with mukim construct: \", logFile)\r\n logFile.writelines(\"23 Match mukimlot with mukim construct ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 24.Copy raw values to project lot and project mukim columns and delete the 2 fields\r\n logFile.writelines(\"24 Recalculate projlot and projmukim based on original values starts at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsRaw = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsRaw:\r\n row.PROJ_MUKIM_NOS = row.PROJECTMUKIM_RAW\r\n row.PROJ_LOT_NOS = row.PROJECTLOT_RAW\r\n rowsRaw.updateRow(row)\r\n if row:\r\n del row\r\n if rowsRaw:\r\n del rowsRaw\r\n except:\r\n log_error(\"Error in 24 Recalculate projlot and projmukim based on original values:\", logFile)\r\n logFile.writelines(\"24 Recalculate projlot and projmukim based on original values ends at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n\r\n # 25. Export Cleaned BCA Permit report for CWD\r\n logFile.writelines(\r\n \"25 Export of Cleaned BCA Permit report starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n # Initialize the file\r\n CleanedBCAPermitReport = xlwt.Workbook()\r\n book = CleanedBCAPermitReport.add_sheet('Book 1')\r\n countrow = 0\r\n countcol = 0\r\n fields = ['Project Ref No', 'Project Title', 'House Blk No', 'Road Name', 'Level No', 'Unit No',\r\n 'Building Name', 'Postal Code', 'Project Mukim nos', 'Project Lot nos', 'Permit Type of Work',\r\n 'Type of Work', \"Owner's name\", \"Owner's firm name\", \"Owner's address\", \"Owner's Tel No\",\r\n \"Owner's Email address\", \"Builder's name\", \"Builder's firm name\", \"Builder's address\",\r\n \"Builder's Tel No\", \"Builder's email address\", \"PE's name\", \"PE's firm name\", \"PE's address\",\r\n \"PE's Tel No\", \"PE's Email address\", \"Architect's name\", \"Architect's firm name\",\r\n \"Architect's address\", \"Architect's Tel No\", \"Architect's Email address\", 'Project Cost',\r\n 'Project Duration', 'Approval Date(DD/MM/YYYY)']\r\n for fieldname in fields:\r\n book.write(countrow, countcol, fieldname)\r\n countcol += 1\r\n CleanedBCAPermitReport.save(config.CleanedBCAPermitFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \".xls\")\r\n\r\n # Copy the data to Excel File\r\n data = arcpy.SearchCursor(config.MatchedMukimLot)\r\n\r\n countrow = 1\r\n countcol = 0\r\n for row in data:\r\n message = [row.PROJ_REF_NO, row.PROJ_TITLE, row.HOUSE_BLK_NO, row.ROAD_NAME, row.LEVEL_NO,\r\n row.UNIT_NO, row.BUILDING_NAME, row.POSTAL_CODE, row.PROJ_MUKIM_NOS, row.PROJ_LOT_NOS,\r\n row.PERMIT_WORK_TYPE, row.WORK_TYPE, row.OWNER_NAME, row.OWNER_FIRM_NAME, row.OWNER_ADDR,\r\n row.OWNER_TEL, row.OWNER_EMAIL, row.BUILDER_NAME, row.BUILDER_FIRM_NAME,\r\n row.BUILDER_ADDR, row.BUILDER_TEL, row.BUILDER_EMAIL, row.PE_NAME, row.PE_FIRM_NAME,\r\n row.PE_ADDR, row.PE_TEL, row.PE_EMAIL, row.ARCHITECT_NAME, row.ARCHITECT_FIRM_NAME,\r\n row.ARCHITECT_ADDR, row.ARCHITECT_TEL, row.ARCHITECT_EMAIL, row.PROJ_COST,\r\n row.PROJ_DURATION_MTHS, row.PROJ_APPROVAL_DATE]\r\n countcol = 0\r\n for element in message:\r\n book.write(countrow, countcol, element)\r\n countcol += 1\r\n countrow += 1\r\n CleanedBCAPermitReport.save(config.CleanedBCAPermitFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \".xls\")\r\n if row:\r\n del row\r\n if data:\r\n del data\r\n except:\r\n log_error(\"Error in 25 Export of Cleaned BCA Permit Report: Error in 26 Catchment calculation: \", logFile)\r\n logFile.writelines(\"25 Export of Cleaned BCA Permit Report ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 26. Catchment calculation\r\n arcpy.env.workspace = config.TempDataGDB\r\n logFile.writelines(\"26 Catchment calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.CatchmentCalculation()\r\n except:\r\n log_error(\"Error in 26 Catchment calculation: \", logFile)\r\n logFile.writelines(\"26 Catchment calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 27. Depot calculation\r\n logFile.writelines(\"27 Depot calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.DepotCalculation()\r\n except:\r\n log_error(\"Error in 27 Depot calculation: \", logFile)\r\n logFile.writelines(\"27 Depot calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 28. Re-add date fields and populate\r\n logFile.writelines(\"28 Re-add date fields and populate starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PERMIT_DATE\", \"Date\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PROJ_APPROVAL_DATE2\", \"Date\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PROJ_END_DATE\", \"Date\")\r\n\r\n rows = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rows:\r\n date = filedate.strftime(\"%Y%m%d\")\r\n year = int(date[:4])\r\n month = int(date[4:6])\r\n day = int(date[6:8])\r\n permit_date = datetime.datetime(year, month, day)\r\n row.PERMIT_DATE = permit_date\r\n row.PROJ_APPROVAL_DATE2 = datetime.datetime.strptime(row.PROJ_APPROVAL_DATE, '%d/%m/%Y')\r\n rows.updateRow(row)\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n except:\r\n log_error(\"Error in 28 Re-add fields and populate: \", logFile)\r\n logFile.writelines(\"28 Re-add fields and populate ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 29. Calculate the end date field\r\n logFile.writelines(\"29 Calculate the end date field starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n\r\n rowsEndDate = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rowsEndDate:\r\n sourcedate = row.PROJ_APPROVAL_DATE2\r\n # sourcedate = datetime.datetime.strptime(row.PROJ_APPROVAL_DATE2 , '%d/%m/%Y')\r\n months = int(row.PROJ_DURATION_MTHS)\r\n d = add_months(sourcedate, months)\r\n row.PROJ_END_DATE = d\r\n rowsEndDate.updateRow(row)\r\n if row:\r\n del row\r\n if rowsEndDate:\r\n del rowsEndDate\r\n except:\r\n log_error(\"Error in 29 Calculate the end date field: \", logFile)\r\n logFile.writelines(\"29 Calculate the end date field ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 30. Calculate Project Total Area\r\n logFile.writelines(\"30 Project total area calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.ProjectTotalArea()\r\n except:\r\n log_error(\"Error in 30 Project total area calculation: \", logFile)\r\n logFile.writelines(\"30 Project total area calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 31. Calculate the BCA_CORRECTED_BY\r\n logFile.writelines(\"31 Calculate the BCA_CORRECTED_BY starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rows_BCA_CB = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rows_BCA_CB:\r\n if \"\\WSN\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"WSN\"\r\n elif \"\\WRN\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"WRN\"\r\n elif \"\\CWD\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"CWD\"\r\n rows_BCA_CB.updateRow(row)\r\n if row:\r\n del row\r\n if rows_BCA_CB:\r\n del rows_BCA_CB\r\n except:\r\n log_error(\"Error in 31 Calculate the BCA_CORRECTED_BY: \", logFile)\r\n\r\n # 32. Remove spaces in PROJ_REF_NO\r\n logFile.writelines(\r\n \"32 Removing of spaces in mukim and project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsSpaces = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rowsSpaces:\r\n lot_no_spaces = row.PROJ_REF_NO.strip()\r\n row.PROJ_REF_NO = lot_no_spaces\r\n rowsSpaces.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpaces:\r\n del rowsSpaces\r\n except:\r\n log_error(\"Error in 32 Removing of spaces in mukim and project lot: \", logFile)\r\n logFile.writelines(\r\n \"32 Removing of spaces in mukim and project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 33. Process the Mukim Construct by Project\r\n logFile.writelines(\r\n \"33 Process the Mukim Construct by Project starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.overwriteOutput = True\r\n try:\r\n MUKIM_CONSTRUCT_BYPROJ_IMPORT = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_IMPORT\"\r\n MUKIMCONBYPROJ_SORT = config.TempDataGDB + \"\\\\MUKIMCONBYPROJ_SORT\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS__2_ = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n\r\n if arcpy.Exists(MUKIM_CONSTRUCT_BYPROJ_IMPORT):\r\n arcpy.Delete_management(MUKIM_CONSTRUCT_BYPROJ_IMPORT)\r\n if arcpy.Exists(MUKIMCONBYPROJ_SORT):\r\n arcpy.Delete_management(MUKIMCONBYPROJ_SORT)\r\n if arcpy.Exists(MUKIM_CONSTRUCT_BYPROJ_DISS):\r\n arcpy.Delete_management(MUKIM_CONSTRUCT_BYPROJ_DISS)\r\n\r\n arcpy.MUKIMCONBYPROJ()\r\n # arcpy.MUKIMCONSTRUCTBYPROJProcess2()\r\n\r\n arcpy.Sort_management(MUKIM_CONSTRUCT_BYPROJ_IMPORT, MUKIMCONBYPROJ_SORT, \"PROJ_END_DATE DESCENDING\",\r\n \"UR\")\r\n arcpy.Dissolve_management(MUKIMCONBYPROJ_SORT, MUKIM_CONSTRUCT_BYPROJ_DISS, \"PROJ_REF_NO\",\r\n \"LOT_KEY FIRST;PROJ_REF_NO FIRST;PROJ_TITLE FIRST;HOUSE_BLK_NO FIRST;ROAD_NAME FIRST;POSTAL_CODE FIRST;LEVEL_NO FIRST;UNIT_NO FIRST;BUILDING_NAME FIRST;PROJ_MUKIM_NOS FIRST;PROJ_LOT_NOS FIRST;PERMIT_WORK_TYPE FIRST;WORK_TYPE FIRST;OWNER_NAME FIRST;OWNER_FIRM_NAME FIRST;OWNER_ADDR FIRST;OWNER_TEL FIRST;OWNER_EMAIL FIRST;BUILDER_NAME FIRST;BUILDER_FIRM_NAME FIRST;BUILDER_ADDR FIRST;BUILDER_TEL FIRST;BUILDER_EMAIL FIRST;PE_NAME FIRST;PE_FIRM_NAME FIRST;PE_ADDR FIRST;PE_TEL FIRST;PE_EMAIL FIRST;ARCHITECT_NAME FIRST;ARCHITECT_FIRM_NAME FIRST;ARCHITECT_ADDR FIRST;ARCHITECT_TEL FIRST;ARCHITECT_EMAIL FIRST;PROJ_TOT_AREA FIRST;PROJ_PARENT_CWDCATCHMENT FIRST;PROJ_PARENT_WSNDEPOT FIRST;PROJ_PARENT_WRPCATCHMENT FIRST;BCA_CORRECTED_BY FIRST;PROJ_DURATION_MTHS FIRST;PROJ_COST FIRST\",\r\n \"MULTI_PART\", \"DISSOLVE_LINES\")\r\n arcpy.JoinField_management(MUKIM_CONSTRUCT_BYPROJ_DISS, \"FIRST_PROJ_REF_NO\", MUKIMCONBYPROJ_SORT,\r\n \"PROJ_REF_NO\", \"PROJ_APPROVAL_DATE;PROJ_END_DATE;PERMIT_DATE\")\r\n arcpy.CalculateField_management(MUKIM_CONSTRUCT_BYPROJ_DISS__2_, \"FIRST_PROJ_TOT_AREA\",\r\n \"[Shape_Area]/10000\", \"VB\", \"\")\r\n\r\n except:\r\n log_error(\"Error in 33 Process the Mukim Construct by Project: \", logFile)\r\n logFile.writelines(\r\n \"33 Process the Mukim Construct by Project ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"33 END process MUKIM CONSTRUCT\")\r\n\r\n # 34. Filter on-going projects\r\n\r\n logFile.writelines(\"34 Filter on-going projects starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n # TempDataGDB = \"G:\\\\Project\\\\GERIUPGRADE\\\\GPTools\\\\NotificationSysTools\\\\BCAReportProcessing\\\\Temp_data.gdb\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n rowsIn = arcpy.UpdateCursor(MUKIM_CONSTRUCT_BYPROJ_DISS)\r\n\r\n row = None\r\n for row in rowsIn:\r\n strdays = str(row.PROJ_END_DATE.date() - datetime.date.today())\r\n splitDays = strdays.split()\r\n if splitDays[0] == '0:00:00':\r\n result = \"On-going project (but will end today)\"\r\n else:\r\n if int(splitDays[0]) < 0:\r\n rowsIn.deleteRow(row)\r\n else:\r\n result = \"On-going project\"\r\n if rowsIn:\r\n del rowsIn\r\n if row:\r\n del row\r\n\r\n except:\r\n log_error(\"Error in 34 Filter on-going projects: \", logFile)\r\n logFile.writelines(\"34 Filter on-going projects ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 35. Append the new data to MUKIM_CONSTRUCT\r\n logFile.writelines(\r\n \"35 Append the new data to MUKIM_CONSTRUCT starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.AppendNewData()\r\n except:\r\n log_error(\"Error in 35 Append the new data to MUKIM_CONSTRUCT: \", logFile)\r\n logFile.writelines(\r\n \"35 Append the new data to MUKIM_CONSTRUCT ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # Clean the memory and the schema lock\r\n arcpy.RefreshCatalog(config.Notification)\r\n arcpy.Compact_management(config.TempDataGDB)\r\n gc.collect()\r\n\r\n # Status update to run/not run the SiteInspection Update\r\n Log_SiteInspectionUpdate = file(config.SiteInspectionUpdate, \"w\")\r\n Log_SiteInspectionUpdate.writelines(\"YES\")\r\n Log_SiteInspectionUpdate.close()\r\n\r\n arcpy.AddMessage(\"END BCA Processing\")\r\n arcpy.AddMessage(\"Passing file date to other functions: \" + repr(filedate))\r\n\r\n # Generate Report\r\n import ReportGeneration_Adhoc_WithProjects as gen_report\r\n gen_report.run(filedate)\r\n #\r\n # # Send email to departments\r\n # import EmailGenerationCompletion_adhoc as send_dept_notification\r\n # if \"CORRECTED\" in BCAreport.upper():\r\n # send_dept_notification.run(filedate, corrected=True)\r\n # else:\r\n # send_dept_notification.run(filedate)\r\n\r\n # Generate advisory letters\r\n import LetterGeneration as letter_gen\r\n letter_gen.run(filedate)\r\n #\r\n # # Send letters to project team\r\n # import EmailGeneration as send_advisory_email\r\n # send_advisory_email.run(filedate)\r\n\r\n\r\n # 36. Move the BCAReport in the backup folder\r\n for BCAreport in correct_config_files:\r\n\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n bk_file_path = os.path.join(config.BCAreportBackupFolder, input_file_name)\r\n\r\n # if the same file name exists in the backup folder, rename the new file with timestamp and move\r\n if os.path.exists(bk_file_path):\r\n\r\n new_filename = datetime.datetime.now().strftime(\"%Y%m%d-%H%M\") + input_file_name\r\n new_filepath = os.path.join(config.BCAreportBackupFolder, new_filename)\r\n shutil.copy(BCAreport, new_filepath)\r\n os.remove(BCAreport)\r\n\r\n # if the filename does not exist in the backup folder, move the file to backup\r\n else:\r\n shutil.move(BCAreport, config.BCAreportBackupFolder)\r\n\r\n logFile.writelines(\"Moved the BCA report to the backup folder at \" + str(datetime.datetime.now()) + \"\\n\")\r\n logFile.close()", "def GetResult(jobid, g_params): # {{{\n # retrieving result from the remote server for this job\n gen_logfile = g_params['gen_logfile']\n gen_errfile = g_params['gen_errfile']\n\n webcom.loginfo(f\"GetResult for {jobid}.\\n\", gen_logfile)\n\n path_static = g_params['path_static']\n path_result = os.path.join(path_static, 'result')\n path_cache = g_params['path_cache']\n finished_date_db = g_params['finished_date_db']\n name_server = g_params['name_server']\n\n rstdir = os.path.join(path_result, jobid)\n runjob_logfile = os.path.join(rstdir, \"runjob.log\")\n runjob_errfile = os.path.join(rstdir, \"runjob.err\")\n outpath_result = os.path.join(rstdir, jobid)\n if not os.path.exists(outpath_result):\n os.mkdir(outpath_result)\n\n remotequeue_idx_file = os.path.join(rstdir, \"remotequeue_seqindex.txt\")\n\n torun_idx_file = os.path.join(rstdir, \"torun_seqindex.txt\")\n finished_idx_file = os.path.join(rstdir, \"finished_seqindex.txt\")\n query_parafile = os.path.join(rstdir, \"query.para.txt\")\n\n query_para = {}\n if os.path.exists(query_parafile):\n content = myfunc.ReadFile(query_parafile)\n if content != \"\":\n try:\n query_para = json.loads(content)\n except ValueError:\n query_para = {}\n failed_idx_file = os.path.join(rstdir, \"failed_seqindex.txt\")\n\n starttagfile = os.path.join(rstdir, \"runjob.start\")\n cnttry_idx_file = os.path.join(rstdir, \"cntsubmittry_seqindex.txt\") # index file to keep log of tries\n tmpdir = os.path.join(rstdir, \"tmpdir\")\n finished_seq_file = os.path.join(outpath_result, \"finished_seqs.txt\")\n\n if not os.path.exists(tmpdir):\n os.mkdir(tmpdir)\n\n finished_info_list = [] # [info for finished record]\n finished_idx_list = [] # [origIndex]\n failed_idx_list = [] # [origIndex]\n resubmit_idx_list = [] # [origIndex]\n keep_queueline_list = [] # [line] still in queue\n\n cntTryDict = {}\n if os.path.exists(cnttry_idx_file):\n with open(cnttry_idx_file, 'r') as fpin:\n try:\n cntTryDict = json.load(fpin)\n except Exception:\n cntTryDict = {}\n\n # in case of missing queries, if remotequeue_idx_file is empty but the job\n # is still not finished, force recreating torun_idx_file\n if 'DEBUG' in g_params and g_params['DEBUG']:\n try:\n webcom.loginfo(\"DEBUG: %s: remotequeue_idx_file=%s, size(remotequeue_idx_file)=%d, content=\\\"%s\\\"\\n\" %(jobid, remotequeue_idx_file, os.path.getsize(remotequeue_idx_file), myfunc.ReadFile(remotequeue_idx_file)), gen_logfile)\n except Exception:\n pass\n if ((not os.path.exists(remotequeue_idx_file) or # {{{\n os.path.getsize(remotequeue_idx_file) < 1)):\n idlist1 = []\n idlist2 = []\n if os.path.exists(finished_idx_file):\n idlist1 = myfunc.ReadIDList(finished_idx_file)\n if os.path.exists(failed_idx_file):\n idlist2 = myfunc.ReadIDList(failed_idx_file)\n\n completed_idx_set = set(idlist1 + idlist2)\n\n jobinfofile = os.path.join(rstdir, \"jobinfo\")\n jobinfo = myfunc.ReadFile(jobinfofile).strip()\n jobinfolist = jobinfo.split(\"\\t\")\n if len(jobinfolist) >= 8:\n numseq = int(jobinfolist[3])\n\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: len(completed_idx_set)=%d+%d=%d, numseq=%d\\n\"%(len(idlist1), len(idlist2), len(completed_idx_set), numseq), gen_logfile)\n\n if len(completed_idx_set) < numseq:\n all_idx_list = [str(x) for x in range(numseq)]\n torun_idx_str_list = list(set(all_idx_list)-completed_idx_set)\n for idx in torun_idx_str_list:\n try:\n cntTryDict[int(idx)] += 1\n except (ValueError, IndexError, KeyError):\n cntTryDict[int(idx)] = 1\n myfunc.WriteFile(\"\\n\".join(torun_idx_str_list)+\"\\n\", torun_idx_file, \"w\", True)\n\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"recreate torun_idx_file: jobid = %s, numseq=%d, len(completed_idx_set)=%d, len(torun_idx_str_list)=%d\\n\"%(jobid, numseq, len(completed_idx_set), len(torun_idx_str_list)), gen_logfile)\n else:\n myfunc.WriteFile(\"\", torun_idx_file, \"w\", True)\n else:\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: %s: remotequeue_idx_file %s is not empty\\n\" %(jobid, remotequeue_idx_file), gen_logfile)\n# }}}\n\n text = \"\"\n if os.path.exists(remotequeue_idx_file):\n text = myfunc.ReadFile(remotequeue_idx_file)\n if text == \"\":\n return 1\n lines = text.split(\"\\n\")\n\n nodeSet = set([])\n for i in range(len(lines)):\n line = lines[i]\n if not line or line[0] == \"#\":\n continue\n strs = line.split(\"\\t\")\n if len(strs) != 6:\n continue\n node = strs[1]\n nodeSet.add(node)\n\n myclientDict = {}\n for node in nodeSet:\n wsdl_url = f\"http://{node}/pred/api_submitseq/?wsdl\"\n try:\n myclient = Client(wsdl_url, cache=None, timeout=30)\n myclientDict[node] = myclient\n except Exception as e:\n webcom.loginfo(f\"Failed to access {wsdl_url} with errmsg {e}\", gen_logfile)\n pass\n\n for i in range(len(lines)): # {{{\n line = lines[i]\n\n if 'DEBUG' in g_params and g_params['DEBUG']:\n myfunc.WriteFile(f\"Process {line}\\n\", gen_logfile, \"a\", True)\n if not line or line[0] == \"#\":\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: line empty or line[0] = '#', ignore\", gen_logfile)\n continue\n strs = line.split(\"\\t\")\n if len(strs) != 6:\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: len(strs)=%d (!=6), ignore\\n\"%(len(strs)), gen_logfile)\n continue\n origIndex = int(strs[0])\n node = strs[1]\n remote_jobid = strs[2]\n description = strs[3]\n seq = strs[4]\n submit_time_epoch = float(strs[5])\n subfoldername_this_seq = f\"seq_{origIndex}\"\n outpath_this_seq = os.path.join(outpath_result, subfoldername_this_seq)\n\n try:\n myclient = myclientDict[node]\n except KeyError:\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: node (%s) not found in myclientDict, ignore\"%(node), gen_logfile)\n keep_queueline_list.append(line)\n continue\n try:\n rtValue = myclient.service.checkjob(remote_jobid)\n except Exception as e:\n msg = \"checkjob(%s) at node %s failed with errmsg %s\"%(remote_jobid, node, str(e))\n webcom.loginfo(msg, gen_logfile)\n rtValue = []\n pass\n isSuccess = False\n isFinish_remote = False\n status = \"\"\n if len(rtValue) >= 1:\n ss2 = rtValue[0]\n if len(ss2) >= 3:\n status = ss2[0]\n result_url = ss2[1]\n errinfo = ss2[2]\n\n if errinfo and errinfo.find(\"does not exist\") != -1:\n if 'DEBUG' in g_params and g_params['DEBUG']:\n msg = \"Failed for remote_jobid %s with errmsg %s\"%(remote_jobid, str(errinfo))\n webcom.loginfo(msg, gen_logfile)\n\n isFinish_remote = True\n\n if status == \"Finished\": # {{{\n isFinish_remote = True\n outfile_zip = f\"{tmpdir}/{remote_jobid}.zip\"\n isRetrieveSuccess = False\n myfunc.WriteFile(\"\\tFetching result for %s/seq_%d from %s \" % (\n jobid, origIndex, result_url), gen_logfile, \"a\", True)\n if myfunc.IsURLExist(result_url, timeout=5):\n try:\n myfunc.urlretrieve(result_url, outfile_zip, timeout=10)\n isRetrieveSuccess = True\n myfunc.WriteFile(f\" succeeded on node {node}\\n\", gen_logfile, \"a\", True)\n except Exception as e:\n myfunc.WriteFile(\" failed with %s\\n\"%(str(e)), gen_logfile, \"a\", True)\n pass\n if os.path.exists(outfile_zip) and isRetrieveSuccess:\n cmd = [\"unzip\", outfile_zip, \"-d\", tmpdir]\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n rst_fetched = os.path.join(tmpdir, remote_jobid)\n if name_server.lower() == \"pconsc3\":\n rst_this_seq = rst_fetched\n elif name_server.lower() == \"boctopus2\":\n rst_this_seq = os.path.join(rst_fetched, \"seq_0\", \"seq_0\")\n rst_this_seq_parent = os.path.join(rst_fetched, \"seq_0\")\n else:\n rst_this_seq = os.path.join(rst_fetched, \"seq_0\")\n\n if os.path.islink(outpath_this_seq):\n os.unlink(outpath_this_seq)\n elif os.path.exists(outpath_this_seq):\n shutil.rmtree(outpath_this_seq)\n\n if os.path.exists(rst_this_seq) and not os.path.exists(outpath_this_seq):\n cmd = [\"mv\", \"-f\", rst_this_seq, outpath_this_seq]\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n if name_server.lower() == \"boctopus2\":\n # move also seq.fa and time.txt for boctopus2\n file1 = os.path.join(rst_this_seq_parent, \"seq.fa\")\n file2 = os.path.join(rst_this_seq_parent, \"time.txt\")\n for f in [file1, file2]:\n if os.path.exists(f):\n try:\n shutil.move(f, outpath_this_seq)\n except:\n pass\n\n fafile_this_seq = os.path.join(outpath_this_seq, \"seq.fa\")\n if webcom.IsCheckPredictionPassed(outpath_this_seq, name_server):\n # relpace the seq.fa with original description\n myfunc.WriteFile('>%s\\n%s\\n'%(description, seq), fafile_this_seq, 'w', True)\n isSuccess = True\n\n if isSuccess:\n # delete the data on the remote server\n try:\n rtValue2 = myclient.service.deletejob(remote_jobid)\n except Exception as e:\n msg = \"Failed to deletejob(%s) on node %s with errmsg %s\"%(remote_jobid, node, str(e))\n webcom.loginfo(msg, gen_logfile)\n rtValue2 = []\n pass\n\n logmsg = \"\"\n if len(rtValue2) >= 1:\n ss2 = rtValue2[0]\n if len(ss2) >= 2:\n status = ss2[0]\n errmsg = ss2[1]\n if status == \"Succeeded\":\n logmsg = \"Successfully deleted data on %s \"\\\n \"for %s\"%(node, remote_jobid)\n else:\n logmsg = \"Failed to delete data on %s for \"\\\n \"%s\\nError message:\\n%s\\n\"%(node, remote_jobid, errmsg)\n else:\n logmsg = \"Failed to call deletejob %s via WSDL on %s\\n\"%(remote_jobid, node)\n\n # delete the downloaded temporary zip file and\n # extracted file\n if os.path.exists(outfile_zip):\n os.remove(outfile_zip)\n if os.path.exists(rst_fetched):\n shutil.rmtree(rst_fetched)\n\n # create or update the md5 cache\n if name_server.lower() == \"prodres\" and query_para != {}:\n md5_key = hashlib.md5((seq+str(query_para)).encode('utf-8')).hexdigest()\n else:\n md5_key = hashlib.md5(seq.encode('utf-8')).hexdigest()\n subfoldername = md5_key[:2]\n md5_subfolder = \"%s/%s\"%(path_cache, subfoldername)\n cachedir = \"%s/%s/%s\"%(path_cache, subfoldername, md5_key)\n\n # copy the zipped folder to the cache path\n origpath = os.getcwd()\n os.chdir(outpath_result)\n shutil.copytree(\"seq_%d\"%(origIndex), md5_key)\n cmd = [\"zip\", \"-rq\", \"%s.zip\"%(md5_key), md5_key]\n webcom.RunCmd(cmd, runjob_logfile, runjob_errfile)\n if not os.path.exists(md5_subfolder):\n os.makedirs(md5_subfolder)\n shutil.move(\"%s.zip\"%(md5_key), \"%s.zip\"%(cachedir))\n shutil.rmtree(md5_key) # delete the temp folder named as md5 hash\n os.chdir(origpath)\n\n # Add the finished date to the database\n date_str = time.strftime(g_params['FORMAT_DATETIME'])\n MAX_TRY_INSERT_DB = 3\n cnttry = 0\n while cnttry < MAX_TRY_INSERT_DB:\n t_rv = webcom.InsertFinishDateToDB(date_str, md5_key, seq, finished_date_db)\n if t_rv == 0:\n break\n cnttry += 1\n time.sleep(random.random()/1.0)\n\n# }}}\n elif status in [\"Failed\", \"None\"]:\n # the job is failed for this sequence, try to resubmit\n isFinish_remote = True\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: %s, status = %s\\n\"%(remote_jobid, status), gen_logfile)\n\n if status != \"Wait\" and not os.path.exists(starttagfile):\n webcom.WriteDateTimeTagFile(starttagfile, runjob_logfile, runjob_errfile)\n\n if isSuccess: # {{{\n time_now = time.time()\n runtime1 = time_now - submit_time_epoch # in seconds\n timefile = os.path.join(outpath_this_seq, \"time.txt\")\n runtime = webcom.ReadRuntimeFromFile(timefile, default_runtime=runtime1)\n info_finish = webcom.GetInfoFinish(\n name_server, outpath_this_seq,\n origIndex, len(seq), description,\n source_result=\"newrun\", runtime=runtime)\n finished_info_list.append(\"\\t\".join(info_finish))\n finished_idx_list.append(str(origIndex))\n # }}}\n\n # if the job is finished on the remote but the prediction is failed,\n # try resubmit a few times and if all failed, add the origIndex to the\n # failed_idx_file\n if isFinish_remote and not isSuccess:\n cnttry = 1\n try:\n cnttry = cntTryDict[int(origIndex)]\n except KeyError:\n cnttry = 1\n if cnttry < g_params['MAX_RESUBMIT']:\n resubmit_idx_list.append(str(origIndex))\n cntTryDict[int(origIndex)] = cnttry+1\n else:\n failed_idx_list.append(str(origIndex))\n\n if not isFinish_remote:\n time_in_remote_queue = time.time() - submit_time_epoch\n # for jobs queued in the remote queue more than one day (but not\n # running) delete it and try to resubmit it. This solved the\n # problem of dead jobs in the remote server due to server\n # rebooting)\n if (\n status != \"Running\"\n and status != \"\"\n and time_in_remote_queue > g_params['MAX_TIME_IN_REMOTE_QUEUE']):\n # delete the remote job on the remote server\n try:\n rtValue2 = myclient.service.deletejob(remote_jobid)\n except Exception as e:\n webcom.loginfo(\"Failed to run myclient.service.deletejob(%s) on node %s with msg %s\"%(remote_jobid, node, str(e)), gen_logfile)\n rtValue2 = []\n pass\n else:\n keep_queueline_list.append(line)\n# }}}\n # Finally, write log files\n finished_idx_list = list(set(finished_idx_list))\n failed_idx_list = list(set(failed_idx_list))\n resubmit_idx_list = list(set(resubmit_idx_list))\n\n if len(finished_info_list) > 0:\n myfunc.WriteFile(\"\\n\".join(finished_info_list)+\"\\n\", finished_seq_file,\n \"a\", True)\n if len(finished_idx_list) > 0:\n myfunc.WriteFile(\"\\n\".join(finished_idx_list)+\"\\n\", finished_idx_file,\n \"a\", True)\n if len(failed_idx_list) > 0:\n myfunc.WriteFile(\"\\n\".join(failed_idx_list)+\"\\n\", failed_idx_file, \"a\",\n True)\n if len(resubmit_idx_list) > 0:\n myfunc.WriteFile(\"\\n\".join(resubmit_idx_list)+\"\\n\", torun_idx_file,\n \"a\", True)\n\n if len(keep_queueline_list) > 0:\n keep_queueline_list = list(set(keep_queueline_list))\n myfunc.WriteFile(\"\\n\".join(keep_queueline_list)+\"\\n\",\n remotequeue_idx_file, \"w\", True)\n else:\n myfunc.WriteFile(\"\", remotequeue_idx_file, \"w\", True)\n\n with open(cnttry_idx_file, 'w') as fpout:\n json.dump(cntTryDict, fpout)\n\n return 0", "def prepare_submission(self):\n ## class Submit2Page\n if (self.form.has_key(\"pdbfile\") == False or \\\n self.form[\"pdbfile\"].file is None or \\\n self.form[\"pdbfile\"].value <= ' '):\n jobid = self.prepare_pdbid_entry()\n return jobid, False\n\n ## allocate a new JobID\n job_id = mysql.job_new()\n\n ## record user's IP address\n ip_addr = os.environ.get(\"REMOTE_ADDR\", \"Unknown\")\n mysql.job_set_remote_addr(job_id, ip_addr)\n\n ## read in all of the lines in the structure file\n infil = self.form[\"pdbfile\"].file\n line_list = []\n while True:\n ln = infil.readline()\n if not ln:\n break\n line_list.append(ln)\n\n ## proceed no further if there were not sufficient lines in uploaded\n ## structure file\n if len(line_list) < 10:\n webtlsmdd.remove_job(job_id)\n raise SubmissionException('Only Recieved %d lines of upload' % (\n len(line_list)))\n\n ## basic sanity checks (for non-via-pdb.org structures)\n run_mainchain_only = False\n r, tmpfile = check_upload(job_id, line_list, mainchain = False)\n if r != '':\n ## \"All atoms\" failed the sanity check. Let's try just the\n ## mainchain atoms.\n r, garbage = check_upload(job_id, line_list, mainchain = True)\n if r != '':\n ## No good. The structure failed both sanity checks.\n ## Can not proceed with this structure.\n raise SubmissionException(str(r))\n else:\n run_mainchain_only = True\n\n ## TODO: Figure out how to do this without webtlsmdd, 2009-05-29\n ## pass the PDB file to the application server\n result = webtlsmdd.set_structure_file(job_id, xmlrpclib.Binary(\"\".join(line_list)))\n if result != \"\":\n raise SubmissionException(result)\n\n return job_id, run_mainchain_only", "def __init__(self, bdefilename=None):\n self.dbname = 'oeebde.db'\n self.recordcode = 'recordcode'\n self.nfilelines = 0\n if bdefilename is not None:\n self.bdefilename = bdefilename\n self.readfile(bdefilename)\n # options\n self.verbose = False\n # Variables related to sumup\n # The status code of a Sum-up to indicate whether it is currently happening\n self.sum_status = {'Preparation': 0, 'Production': 0, 'Maintenance': 0, 'Process': 0, 'W-up': 0, 'JobEnd': 0}\n # The results variables\n self.content = []\n self.sumups = {}\n self.output = {}\n self.errors = {}\n self.unsumed_lines = []\n # Constant to indicate whether a Sum-up is significant\n self.SUM_UNKNOWN = -1\n self.SUM_TRIVIAL = 0\n self.SUM_SIGNIFICANT = 1\n self.SUM_CONCATENATE = 2\n self.SUM_TRIVIAL_BUT_NEEDED = 3\n self.SUM_TRIVIAL_AND_SKIPPED = 4\n self.SUM_TRIVIAL_AND_CONCATENATE = 5\n self.SUM_TRIVIAL_AND_CONCATENATE_TO_NEXT = 6\n # significant duration is 5 min (convert to unit hour)\n self.SIG_DURATION = 5.0/60.0\n # significant Impreesion Count is 20\n self.SIG_IMPCOUNT = 20", "def some_job():\r\n\tfor row in rows:\r\n\t\treceipt_number = row[0]\r\n\t\tphone_number = row[2]\r\n\t\treturn case_status_check(receipt_number, phone_number)", "def run_job(self, job):\n try:\n rv = job.run()\n except BitcodeBuildFailure:\n # Catch and log an error\n env.error(u\"Failed to compile bundle: {}\".format(self.input))\n else:\n return rv", "def report_job_status(self, run_dir, jhash):\n run_dir_jhash, run_dir_status = self.run_dir_status(run_dir)\n if run_dir_jhash == jhash:\n return run_dir_status\n else:\n return runner_client.JobStatus.MISSING", "def _get_job_status(self):\n total_hits = session.query(BoxHit).filter_by(training_job_id=self.id).count()\n num_hits_left = session.query(BoxHit).filter_by(training_job_id=self.id, outstanding=True).count()\n total_urls = self.num_urls\n num_urls_left = session.query(VideoTrainingURL).filter_by(job=self, processed=False).count()\n faces_obtained = MTurkBox.query.filter_by(label=self.evaluator.target_label, result=True).count()\n return '\\n'.join([\n '------------- Stats for Job ID: %s -------------' % str(self.id) ,\n 'Job for Label : %s' % self.label.name,\n 'Total URLs : %d' % total_urls,\n 'Total HITs : %d' % total_hits,\n 'unprocessed URLS : %d' % num_urls_left,\n 'outstanding Hits : %d' % num_hits_left,\n 'Job Finish Status : %s' % self.finished,\n 'Faces Obtained : %d' % faces_obtained,\n ]) + '\\n'", "def _parse_results(self):\n for line in self.file_dic['output'].splitlines():\n if line.startswith(' * GAMESS VERSION = '):\n temp = line.split('=')[1]\n temp = temp.split('*')[0]\n self.version = temp.strip()\n\n if line[1:25] == 'FREE ENERGY OF SOLVATION' and line.find('1 ATM') == -1:\n temp = line.split()\n #Take the next number after =\n #In KCAL/MOL\n self.solvation_energy = float(temp[temp.index(\"=\") + 1])", "def after_return(self, status, retval, task_id, args, kwargs, einfo):\n self.job_db.update_job_state(self.job_id, status.lower())\n if status == 'FAILURE':\n error_object = { 'job_id': self.job_id, 'job_name': self.name, 'message': self.error }\n self.job_db.add_job_error( self.job_id, error_object )\n\n if self.parent_job_id is not None:\n self._propagate_failure_to_ancestors(self.parent_job_id, error_object)\n self.job_db.close()", "def determine_exit_code(self) -> int:", "def check_entry(self, controller, entries, list_of_project_info, error_label):\r\n\r\n for x in range(0, len(entries)):\r\n if entries[x].get() == \"\":\r\n messagebox.showerror(\"Error\", \"Expected no empty fields\")\r\n return\r\n if not entries[2].get().isalpha():\r\n messagebox.showerror(\"Error\", \"Expected column in letter not number, e.g. 'B' \")\r\n return\r\n name_col = self.col_to_num(entries[2].get())\r\n self.write_to_indata(entries)\r\n\r\n list_error,error_present = [], []\r\n list_error = controller.start_config(entries, name_col, list_error, list_of_project_info)\r\n if len(list_error) == 0:\r\n message = \"Successfully generated all state files\"\r\n error_present.append(message)\r\n error_label.config(text=\"Successfully generated all state files\")\r\n else:\r\n for element in list_error:\r\n if element.error_type == \"1\": # error in loop_trough_row\r\n message = \"expected error in excel spreadsheet at row\" + str(element.file_name) + \"\\n\"\r\n elif element.error_type == \"2\": #filname missing\r\n message = \"expected error in file \" + str(element.file_name)+ \"\\n\"\r\n elif element.error_type == \"3\": # Filename error\r\n message = \"expected error in file name at row \" + str(element.file_name) + \"\\n\"\r\n elif element.error_type == \"4\": # \"Seems like error in 1:st or 3:rd line in excel sheet\"\r\n message = \"expected error in excel spreadsheet on 1:st or 3:rd row \" + \"\\n\"\r\n error_present.append(message)\r\n error_report = open(\"error_report.txt\", \"w+\")\r\n error_report.write(''.join(error_present))\r\n error_report.close()\r\n error_label.config(text=\"Error occured, check error report in \"+ entries[1].get())\r\n # error_label.config(text=(''.join(error_present)))\r", "def findViolations(datafeed):\n \n jobs_map, fraud_jobs, ended_jobs = dict(), [], []\n \n for i, data in enumerate(datafeed):\n emp_name, value = data.split(';')\n value = value.split(',')\n \n if value[0] == 'START': \n # Create an entry for a new job \n parseStartLines(emp_name, jobs_map, i+1)\n else: \n # Updates the new employee with the end line and job ids\n fraud_jobs.extend(parseEndedJobs(jobs_map, value, emp_name, i+1, ended_jobs))\n\n return fraud_jobs", "def processValidationTests(scriptLogger,inputDirectory,outputDirectory,caseSourceType,dbHost,uniDBdbName,sourceDBName,dbuser,dbPassword,inputTestParameterSettingsXML):\n\n #Local Variables\n arrayXMLTestCaseFiles = []\n logger = scriptLogger\n\n # #Setup Test Logger\n # logFileName='validation_script_nt2osm.log'\n # logging.basicConfig(filename = logFileName,filemode='a',format = '%(levelname)-10s %(asctime)s || %(message)s',level = logging.DEBUG)\n # logger = logging.getLogger('nt2osm_validation')\n # beginTestMsg = 'Beginning Validation Test'\n # print beginTestMsg\n # logger.info(beginTestMsg)\n\n #Create Validation Test Report Instance\n validationReport = vreport.ValidationReportClass(outputDirectory)\n\n #Database Connection Parameters\n host = dbHost\n uniDBDatabase = uniDBdbName\n sourceDatabase = sourceDBName\n username = dbuser\n password = dbPassword\n\n #Setup uniDB Database Connection and Cursor\n uni_dbConnection = psycopg2.connect('dbname={dbname} host = {host} user={user} password={password}'.format(host=host,dbname=uniDBDatabase,user=username,password=password))\n uni_dbCursor = uni_dbConnection.cursor()\n\n #Setup source Database Connection and Cursor\n source_dbConnection = psycopg2.connect('dbname={dbname} host = {host} user={user} password={password}'.format(host=host,dbname=sourceDatabase,user=username,password=password))\n source_dbCursor = source_dbConnection.cursor()\n\n #Set Test Begin Time\n validationReport.setStartTime()\n\n #Validate All XML Case Lists\n if caseSourceType == 'xml' or caseSourceType == 'both':\n\n #Get List of XML Files from Input Directory.\n for root,dirs,files in os.walk(inputDirectory):\n for file in files:\n fileExtension = file.split('.')[1]\n xmlFileType = file.split('.')[0].split('_')[0]\n if fileExtension== 'xml' and xmlFileType == 'xmllist':\n arrayXMLTestCaseFiles.append(os.path.join(inputDirectory,file))\n\n for i in range(0,len(arrayXMLTestCaseFiles)):\n xmlFileName = arrayXMLTestCaseFiles[i]\n\n #Identify Test Name and Test Case List\n uniDBValidationTestName = XML2TC.xml2TestCaseAdapter.getTestNameFromXMLFile(xmlFileName)\n testCaseList = getUniDBTestCasesFromXML(xmlFileName,uni_dbConnection,uni_dbCursor)\n\n #Validate Test Case List\n testResults = validateTestCaseList(testCaseList,uniDBValidationTestName,outputDirectory,logger)\n\n #Add Validation Test Result to Validation Report\n validationReport.addTest(testResults)\n\n #Validate All CSV Case Lists\n if caseSourceType == 'db' or caseSourceType == 'both':\n csvDBCaseProcessor = SPCSV2TC.StoredProcCSV2TestCaseAdapter(inputTestParameterSettingsXML,inputDirectory,source_dbConnection,source_dbCursor)\n numberCSVTests = csvDBCaseProcessor.getNumberValidationTests()\n\n for i in range(0,numberCSVTests):\n #Identify Test Name and Test Case List\n uniDBValidationTestName = csvDBCaseProcessor.getTestNameFromTestParametersXMLFile(i)\n rawListCasesFromDB = csvDBCaseProcessor.getCasesFromSourceDB(i)\n validationTestMappingRules = csvDBCaseProcessor.getMappingRulesFromCSV(i)\n testCaseList = getUniDBTestCasesFromDB(csvDBCaseProcessor,rawListCasesFromDB,i,validationTestMappingRules,uni_dbConnection,uni_dbCursor)\n\n #Validate Test Case List\n testResults = validateTestCaseList(testCaseList,uniDBValidationTestName,outputDirectory,logger)\n\n #Add Validation Test Result to Validation Report\n validationReport.addTest(testResults)\n\n #Set Test End time\n validationReport.setEndTime()\n\n #Generate Validation Report\n reportOutput = validationReport.outputTestReport()\n\n #Last Validation Report Logger Information\n loggerMsg = 'Validation Script has completed!'\n print loggerMsg\n logger.info(loggerMsg)\n loggerMsg = 'All individual test results and the validation test summary saved to: {outputFilePath}'.format(outputFilePath=outputDirectory)\n print loggerMsg\n logger.info(loggerMsg)", "def get_error(self):\n\t\treturn handle_to_object(call_sdk_function('PrlJob_GetError', self.handle))", "def test_failed_job(self):\n\n failed_job = json.loads(TREEHERDER_JOB % (\"testfailed\", \"completed\"))\n self.assertEquals(self.query_api.get_job_status(failed_job), FAILURE)", "def _get_error_info(self, result, log):\n _ = '/opt/l2deploy/logs/OverallStatusReport'\n f = self._remote_cmd(\"grep '{}' {}\".format(_, log))\n f = f.get('output').split('[')[-1][:-1]\n\n for n in [result] if self.nodes == 1 else result['nodes']:\n if 'failed' == n.get('status').lower():\n # 10th line in the detail report contains the required info\n c = \"grep -A 10 {} {}\".format(n.get('server'), f)\n c += \" | grep OS_Install_Status_Detail\"\n e = self._remote_cmd(c).get('output').split(':', 1)[1]\n LOG.info(\"{} failed due to {}\".format(n['server'], e))", "def job_run_status(job_name, run_status=-1, run_id=None):\r\n # If already started, update row status and end time.\r\n if run_id:\r\n sql = \"\"\"\r\n update dbo.Metadata_ETL_Job_History\r\n set job_status = {}, end_time = getdate()\r\n output inserted.etl_job_history_id\r\n where etl_job_history_id = {};\r\n \"\"\".format(run_status, run_id)\r\n # Add new row for the run.\r\n else:\r\n sql = \"\"\"\r\n insert into dbo.Metadata_ETL_Job_History(\r\n job_id, job_name, start_time, job_status\r\n )\r\n output inserted.etl_job_history_id\r\n select job_id, job_name, getdate(), -1\r\n from dbo.Metadata_ETL_Job where job_name = '{}';\r\n \"\"\".format(job_name)\r\n with pyodbc.connect(ETL_LOAD_A_ODBC_STRING) as conn:\r\n if run_status in (-1, 0, 1):\r\n with conn.execute(sql) as cursor:\r\n output = cursor.fetchall()\r\n if len(output) < 1:\r\n raise RuntimeError(\r\n \"No job named {} in job metadata table.\".format(job_name)\r\n )\r\n else:\r\n run_id = output[0][0]\r\n conn.commit()\r\n else:\r\n raise ValueError(\"Run status must be -1, 0, or 1.\")\r\n return run_id, run_status", "def check_backup(self):\n res = 0\n sql = '''select status\n FROM v$rman_backup_job_details\n WHERE start_time > SYSDATE - 1\n ORDER BY END_TIME '''\n self.cur.execute(sql)\n curres = self.cur.fetchall()\n rescount = (self.cur.rowcount)\n if rescount == 0:\n res = 99\n print(res)\n else:\n for i in curres:\n if re.search('FAILED|ERROR', i[0]):\n res = res + 1\n print(res)", "def run_dir_status(self, run_dir):\n disk_in_path = run_dir.join('in.json')\n disk_status_path = run_dir.join('status')\n if disk_in_path.exists() and disk_status_path.exists():\n # status should be recorded on disk XOR in memory\n assert run_dir not in self.report_jobs\n disk_in_text = pkio.read_text(disk_in_path)\n disk_jhash = pkjson.load_any(disk_in_text).reportParametersHash\n disk_status = pkio.read_text(disk_status_path)\n if disk_status == 'pending':\n # We never write this, so it must be stale, in which case\n # the job is no longer pending...\n pkdlog(\n 'found \"pending\" status, treating as \"error\" ({})',\n disk_status_path,\n )\n disk_status = runner_client.JobStatus.ERROR\n return disk_jhash, runner_client.JobStatus(disk_status)\n elif run_dir in self.report_jobs:\n job_info = self.report_jobs[run_dir]\n return job_info.jhash, job_info.status\n else:\n return None, runner_client.JobStatus.MISSING", "def analyze_results(self, results):\n ok_c = 0\n ko_c = 0\n for row in results:\n if \"opentsdb.health\" not in row[2] and \".health\" in row[2]:\n if row[4] == \"ERROR\":\n ko_c += 1\n else:\n ok_c += 1\n return ok_c, ko_c", "def execute_failure(self, *args, **kwargs):\n return 1, \"\", None", "def _validate_jobs(\n self,\n check_nlst_warn: bool = False\n ):\n counter = 0\n for job in self.jobs:\n counter += 1\n print(job.job_id)\n if counter == 0:\n ignore_restarts = False\n else:\n ignore_restarts = True\n\n check_input_files(\n hrldas_namelist=job.hrldas_namelist,\n hydro_namelist=job.hydro_namelist,\n sim_dir=os.getcwd(),\n ignore_restarts=ignore_restarts,\n check_nlst_warn=check_nlst_warn\n )", "def run(self):\n success = False\n try:\n # Download file if a source_zip_url was given\n if self.source_zip_url:\n App.logger.debug(\"Linting url: \" + self.source_zip_url)\n self.download_archive()\n # unzip the input archive if a source_zip_file exists\n if self.source_zip_file:\n App.logger.debug(\"Linting zip: \" + self.source_zip_file)\n self.unzip_archive()\n # lint files\n if self.source_dir:\n self.rc = RC(directory=self.source_dir)\n App.logger.debug(\"Linting '{0}' files...\".format(self.source_dir))\n success = self.lint()\n App.logger.debug(\"...finished.\")\n except Exception as e:\n message = 'Linting process ended abnormally: {0}'.format(e.message)\n App.logger.error(message)\n self.log.warnings.append(message)\n App.logger.error('{0}: {1}'.format(str(e), traceback.format_exc()))\n result = {\n 'success': success,\n 'warnings': self.log.warnings,\n }\n App.logger.debug(\"Linter results: \" + str(result))\n return result", "def _check_job_status(self) -> str:\n self._assert_job_created()\n\n r = requests.post(\n f'https://{cc.ROUTE_PREFIX}.stratodem.com/jobs/status',\n headers=dict(\n Authorization=f'Bearer {get_api_token()}',\n ),\n json=dict(job_id=self._job_id)\n )\n\n if not r.status_code == 200:\n raise APIQueryFailedException('Failed to determine job status')\n\n r = r.json()\n\n if not r['success']:\n raise APIQueryFailedException(r)\n else:\n return r['message']", "def testFWJRInputFileTruncation(self):\n\n self.config.JobStateMachine.maxFWJRInputFiles = 0\n change = ChangeState(self.config, \"changestate_t\")\n\n locationAction = self.daoFactory(classname=\"Locations.New\")\n locationAction.execute(\"site1\", pnn=\"T2_CH_CERN\")\n\n testWorkflow = Workflow(spec=self.specUrl, owner=\"Steve\",\n name=\"wf001\", task=self.taskName)\n testWorkflow.create()\n testFileset = Fileset(name=\"TestFileset\")\n testFileset.create()\n\n testFile = File(lfn=\"SomeLFNC\", locations=set([\"T2_CH_CERN\"]))\n testFile.create()\n testFileset.addFile(testFile)\n testFileset.commit()\n\n testSubscription = Subscription(fileset=testFileset,\n workflow=testWorkflow)\n testSubscription.create()\n\n splitter = SplitterFactory()\n jobFactory = splitter(package=\"WMCore.WMBS\",\n subscription=testSubscription)\n jobGroup = jobFactory(files_per_job=1)[0]\n\n self.assertEqual(len(jobGroup.jobs), 1,\n \"Error: Splitting should have created one job.\")\n\n testJobA = jobGroup.jobs[0]\n testJobA[\"user\"] = \"sfoulkes\"\n testJobA[\"group\"] = \"DMWM\"\n testJobA[\"taskType\"] = \"Processing\"\n\n change.propagate([testJobA], 'created', 'new')\n myReport = Report()\n reportPath = os.path.join(getTestBase(),\n \"WMCore_t/JobStateMachine_t/Report.pkl\")\n myReport.unpersist(reportPath)\n\n testJobA[\"fwjr\"] = myReport\n\n change.propagate([testJobA], 'executing', 'created')\n\n changeStateDB = self.couchServer.connectDatabase(dbname=\"changestate_t/fwjrs\")\n allDocs = changeStateDB.document(\"_all_docs\")\n\n self.assertEqual(len(allDocs[\"rows\"]), 2,\n \"Error: Wrong number of documents\")\n\n result = changeStateDB.loadView(\"FWJRDump\", \"fwjrsByWorkflowName\")\n self.assertEqual(len(result[\"rows\"]), 1,\n \"Error: Wrong number of rows.\")\n for row in result[\"rows\"]:\n couchJobDoc = changeStateDB.document(row[\"value\"][\"id\"])\n self.assertEqual(couchJobDoc[\"_rev\"], row[\"value\"][\"rev\"],\n \"Error: Rev is wrong.\")\n\n for resultRow in allDocs[\"rows\"]:\n if resultRow[\"id\"] != \"_design/FWJRDump\":\n fwjrDoc = changeStateDB.document(resultRow[\"id\"])\n break\n\n self.assertEqual(fwjrDoc[\"fwjr\"][\"steps\"]['cmsRun1']['input']['source'], [])\n\n return", "def __exit__(self, _type, value, traceback):\n try:\n if not self.record_finish:\n return\n print >>sys.stderr, 'record', self.record\n dutset = {'last_finish_time':time()}\n if not self.record:\n return\n upd = {'end_time': time(), 'modification_time':time()}\n\n if value: # i.e. , if test failed:\n upd['failure'] = repr(value)\n upd['exception'] = value.__class__.__name__\n if not isinstance(value, KeyboardInterrupt):\n print 'HEADLINE: exception', upd['exception'], value\n for clause in format_exception(_type, value, traceback):\n for line in clause.split('\\n'):\n print 'CRASH:', line\n else:\n upd['infrastructure_problem'] = True\n upd['whiteboard'] = '[infrastructure] test interrupted'\n if self.reinstall_on_failure:\n dutset['test_failed'] = True\n tnext = time() + 300\n print 'INFO: test failed, so will reinstall machine at', \\\n asctime(localtime(tnext))\n\n if self.failed: #some test suite failed\n upd['failure'] = 'test failed'\n\n self.mdb.results.update({'_id':self.result_id}, {'$set':upd})\n classify = process_result(self.mdb.results.find_one({'_id':self.result_id}))\n print 'HEADLINE:', classify, self.full_description()\n\n get_track().updates.save({'result_id':self.result_id,\n 'action':'experiment finished'})\n\n if self.dut_id:\n self.mdb.duts.update({'_id':self.dut_id}, \n {'$unset': {'control_pid':1, 'result_id':1,\n 'control_command_line':1},\n '$set': dutset})\n if self.build:\n recount(self.build)\n if classify == 'infrastructure_problems':\n pass\n else:\n col = 'green' if classify == 'passes' else 'red'\n finally:\n if self.record_queue:\n self.record_queue.put('finish')\n self.record_queue.close()\n self.record_queue.join_thread()\n if self.stream_process:\n self.stream_process.join()\n if self.stdout_filter:\n self.stdout_filter.del_callback(self)", "def _main(self):\n\n i = 0\n\n for i, step in enumerate(self._run_list):\n return_code = self._check_step_completed(i)\n\n if return_code == 0:\n logger.debug('Successful: \"{}\".'.format(list(step.keys())[0]))\n else:\n return_code = 1\n self._submit_step(i)\n\n # do not enter while loop for continuous monitoring\n if not self.monitor:\n break\n\n time.sleep(1)\n while return_code == 1 and self.monitor:\n time.sleep(5)\n return_code = self._check_step_completed(i)\n\n if return_code == 2:\n module, f_config = self._get_command_config(i)\n raise ExecutionError('Pipeline failed at step '\n '{} \"{}\" {}'\n .format(i, module, f_config))\n\n if i + 1 == len(self._run_list) and return_code == 0:\n logger.info('Pipeline job \"{}\" is complete.'\n .format(self._config.name))\n logger.debug('Output directory is: \"{}\"'\n .format(self._config.dirout))", "def test_results(self, affiliate_items):\n processed_count = 0\n error_count = 0\n\n updater = mock.Mock()\n batch_job = BatchJob(affiliate_items, updater)\n\n for result in batch_job.run():\n processed_count += 1\n error_count += int(result.is_error)\n\n assert updater.call_count == 4\n assert processed_count == 4\n assert error_count == 0", "def validate_fields(row,fila,new_values):\n # \"Definiciones iniciales del diccionario de errores\n # y status de error.\n dict_error={}\n error_count = False\n\n \"\"\"Validaciones de cada campo segun el modelo Job.\"\"\"\n #Validate Job.id\n if row[0] == None:\n new_values['id'] =row[0]\n # dict_error['id'] = ''\n else:\n try:\n if isinstance(int(row[0]),int):\n new_values['id'] =row[0]\n # dict_error['id'] = ''\n except ValueError:\n error_count=True\n dict_error['id'] = ValueError('Error en id')\n\n #Validate Job.company_ruc\n if len(row[1])==11:\n new_values['company_ruc'] =row[1]\n # dict_error['company_ruc'] = ''\n else:\n error_count=True\n dict_error['company_ruc'] = ValidationError('Error en company_ruc')\n\n #Validate Job.company_name\n if len(row[2])<=100 and row[2] != None:\n new_values['company_name'] =row[2]\n # dict_error['company_name'] = ''\n else:\n error_count=True\n dict_error['company_name'] = ValidationError('Error en company_name')\n\n #Validate Job.title\n if len(row[3])<=100 and row[3] != None:\n new_values['title'] =row[3]\n # dict_error['title'] = ''\n else:\n error_count=True\n dict_error['title'] = ValidationError('Error en title')\n\n #Validate Job.description\n if isinstance(row[4],str) and row[4] != None:\n new_values['description'] =row[4]\n # dict_error['description'] = ''\n else:\n error_count=True\n dict_error['description'] = ValidationError('Error en description')\n\n #Validate Job.requeriments\n if isinstance(row[5],str) and row[5] != None:\n new_values['requeriments'] =row[5]\n # dict_error['requeriments'] = ''\n else:\n error_count=True\n dict_error['requeriments'] = ValidationError('Error en requeriments')\n\n #Validate Job.contact_email\n if row[6] != None:\n new_values['contact_email'] =row[6]\n # dict_error['contact_email'] = ''\n else:\n error_count=True\n dict_error['contact_email'] = ValidationError('Error en contact email')\n\n #Validate Job.location\n if len(row[7])<=50 and row[7] != None:\n new_values['location'] =row[7]\n # dict_error['location'] = ''\n else:\n error_count=True\n dict_error['location'] = ValidationError('Error en Location')\n\n #Validate Job.is_active\n if row[8] == None:\n new_values['is_active'] =row[8]\n # dict_error['is_active'] = ''\n else:\n try:\n if int(row[8])==1 or int(row[8])==0:\n new_values['is_active'] =row[8]\n # dict_error['is_active'] = ''\n except Exception as e:\n error_count=True\n dict_error['is_active'] = ValidationError('Error en el is_active')\n\n #Validate Job.is_verified\n if row[9] == None:\n new_values['is_verified'] =row[9]\n # dict_error['is_verified'] = ''\n else:\n try:\n if int(row[9])==1 or int(row[9])==0:\n new_values['is_verified'] =row[9]\n # dict_error['is_verified'] = ''\n except Exception as e:\n error_count=True\n dict_error['is_verified'] = ValidationError('Error en el is_verified')\n\n #Validate Job.is_public\n if row[10] == None:\n new_values['is_public'] =row[10]\n # dict_error['is_public'] = ''\n else:\n try:\n if int(row[10])==1 or int(row[10])==0:\n new_values['is_public'] =row[10]\n # dict_error['is_public'] = ''\n except Exception as e:\n error_count=True\n dict_error['is_public'] = ValidationError('Error en el is_public')\n\n #Validate Job.show_recruiter\n if row[11] == None:\n new_values['show_recruiter'] =row[11]\n # dict_error['show_recruiter'] = ''\n else:\n try:\n if int(row[11])==1 or int(row[11])==0:\n new_values['show_recruiter'] =row[11]\n # dict_error['show_recruiter'] = ''\n except Exception as e:\n error_count=True\n dict_error['show_recruiter'] = ValidationError('Error en el show_recruiter')\n\n #Validate Job.website_url\n if row[12] == None:\n new_values['website_url'] =row[12]\n # dict_error['website_url'] = ''\n else:\n if len(row[12])>4:\n new_values['website_url'] =row[12]\n # dict_error['website_url'] = ''\n else:\n error_count=True\n dict_error['website_url'] = ValidationError('Error en website_url')\n\n #Validate Job.benefits\n if isinstance(row[13],str) or row[13] == None:\n new_values['benefits'] =row[13]\n # dict_error['benefits'] = ''\n else:\n error_count=True\n dict_error['benefits'] = ValidationError('Error en benefits')\n\n #Validate Job.urgency\n if isinstance(row[14],str) or row[14] == None:\n new_values['urgency'] =row[14]\n # dict_error['urgency'] = ''\n else:\n error_count=True\n dict_error['urgency'] = ValidationError('Error en urgency')\n\n #Validate Job.schedule\n if isinstance(row[15],str) or row[15] == None:\n new_values['work_schedule'] =row[15]\n # dict_error['work_schedule'] = ''\n else:\n error_count=True\n dict_error['work_schedule'] = ValidationError('Error en work_schedule')\n\n #Validate Job.comment\n if isinstance(row[16],str) or row[16] == None:\n new_values['comment'] =row[16]\n # dict_error['comment'] = ''\n else:\n error_count=True\n dict_error['comment'] = ValidationError('Error en comment')\n\n #Validate Job.min_salary\n if isinstance(int(row[17]),int) or row[17] == None:\n new_values['min_salary'] =row[17]\n # dict_error['min_salary'] = ''\n else:\n error_count=True\n dict_error['min_salary'] = ValidationError('Error en min_salary')\n\n #Validate Job.max_salary\n if isinstance(int(row[18]),int) or row[18] == None:\n new_values['max_salary'] =row[18]\n # dict_error['max_salary'] = ''\n else:\n error_count=True\n dict_error['max_salary'] = ValidationError('Error en max_salary')\n\n #Validate Job.pay_range_period\n if row[19]=='annual' or row[19]=='monthly' or row[19] == None:\n new_values['pay_range_period'] =row[19]\n # dict_error['pay_range_period'] = ''\n else:\n error_count=True\n dict_error['pay_range_period'] = ValidationError('Error en pay_range_period')\n\n return {\"errors\": dict_error,\"fila\":fila,\"new_values\":new_values,\"error_status\":error_count}", "def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)", "def get_problem_report(self):\n\t\treturn Job(SDK.PrlSrv_GetProblemReport(self.handle)[0])", "def process(self, conn):\n batch = msgpack.unpackb(self._request(conn), raw=False)\n ids = list(batch.keys())\n self.logger.debug(f'Received job ids: {ids}')\n\n # validate request\n validated = []\n errors = []\n for i, byte in enumerate(batch.values()):\n try:\n data = self._unpack(byte)\n obj = self.req_schema.parse_obj(data)\n validated.append(obj)\n self.logger.debug(f'{obj} passes the validation')\n except ValidationError as err:\n errors.append((i, self._pack(err.errors())))\n self.logger.info(\n f'Job {ids[i]} validation error',\n extra={'Validation': err.errors()}\n )\n except (json.JSONDecodeError,\n msgpack.ExtraData, msgpack.FormatError, msgpack.StackError) as err:\n errors.append((i, self._pack(str(err))))\n self.logger.info(f'Job {ids[i]} error: {err}')\n\n # inference\n self.logger.debug(f'Validated: {validated}, Errors: {errors}')\n result = []\n if validated:\n result = self.infer(validated)\n assert len(result) == len(validated), (\n 'Wrong number of inference results. '\n f'Expcet {len(validated)}, get{len(result)}.'\n )\n\n # validate response\n for data in result:\n self.resp_schema.parse_obj(data)\n\n # add errors information\n err_ids = ''\n result = [self._pack(data) for data in result]\n for index, err_msg in errors:\n err_ids += ids[index]\n result.insert(index, err_msg)\n\n # build batch job table\n resp = dict(zip(ids, result))\n if err_ids:\n resp['error_ids'] = err_ids\n self._response(conn, resp)", "def test_get_refresh_job_status(self):\n pass", "def handle_delf_init(self, job):\n\n # Check which, if any, servers the file exists on\n self.put_job_in_all_queues(job)\n list_job_results = self.get_internal_results_from_all_servers()\n if len(list_job_results) == 0:\n # There were no servers active\n self.put_external_result(self.generate_failure_job(\"Unsuccessful, no servers running\"))\n return\n\n return_result = copy.deepcopy(list_job_results[0])\n return_result.result[\"file_exists\"] = False\n\n for result in list_job_results:\n if result.result[\"file_exists\"]:\n return_result.result[\"file_exists\"] = True\n\n return_result.result[\"outcome\"] = \"success\"\n return_result.processed_by = None\n\n self.put_external_result(return_result)", "def do_work(parser) -> dict:\n result = {}\n\n parser.add_argument('--logging', '-l', nargs='?', default=os.getenv(\"LOGGING\"),\n help='file or url or logging configuration (default=None)')\n\n parser.add_argument('--debug', '-d', action='store_const',\n default=logging.WARN, const=logging.DEBUG,\n help='enable debug logging (default=WARN)')\n\n parser.add_argument('--info', '-i', action='store_const',\n default=logging.WARN, const=logging.INFO,\n help='enable info logging (default=WARN)')\n\n parser.add_argument('--options', '-o', default=None,\n help='optional comma separated list of name/value pairs to pass (eg: \"name=value,name2=value2\")')\n\n parser.add_argument('datatype', type=str, help='type of data to retrieve: cultivars, experiments, sites, traits')\n parser.add_argument('date', type=str, nargs='?',help='date needed by the \"sites\" datatype parameter in YYYY-MM-DD format')\n\n args = parser.parse_args()\n\n # start logging system\n do_setup_logging(args.logging)\n logging.getLogger().setLevel(args.debug if args.debug == logging.DEBUG else args.info)\n\n if args.datatype == 'sites' and not args.date:\n result['error'] = \"A date must be specified with the datatype parameter of \\\"sites\\\"\"\n result['code'] = -1\n logging.error(result['error'])\n logging.error(\" Stopping processing\")\n return result['code']\n\n os.environ['BETYDB_URL'] = BETYDB_URL\n os.environ['BETYDB_KEY'] = BETYDB_KEY\n logging.debug(\"Calling BETYdb at location: %s\", BETYDB_URL)\n\n opts = {}\n if args.options:\n options = args.options.split(',')\n for one_option in options:\n if '=' in one_option:\n opt_name, opt_value = one_option.split('=')\n if opt_name:\n opts[opt_name] = opt_value\n else:\n opts[one_option] = ''\n if opts:\n logging.debug(\"Calling BETYdb with options: %s\", str(opts))\n\n type_map = {\n 'cultivars': lambda: do_get_cultivars(limit='none', **opts),\n 'experiments': lambda: do_get_experiments(associations_mode='full_info', limit='none', **opts),\n 'sites': lambda: do_get_sites(args.date, **opts),\n 'traits': lambda: do_get_traits(limit='none', **opts),\n }\n\n if args.datatype in type_map:\n data = type_map[args.datatype]()\n print(json.dumps(data, indent=2))\n else:\n result['error'] = \"Invalid datatype parameter specifed: '%s'. Stopping processing\" % str(args.datatype)\n result['code'] = -2\n\n if 'error' in result:\n logging.error(result['error'])\n logging.error(\" Stopping processing\")\n if 'warning' in result:\n logging.warning(result['warning'])\n\n return result['code'] if 'code' in result else 0", "def __checkAndRemoveFinished(self, running):\n with self.__queueLock:\n returnCode = running.getReturnCode()\n if returnCode != 0:\n metadataFailedRun = running.getMetadata()\n metadataToKeep = metadataFailedRun\n if metadataFailedRun is not None:\n metadataKeys = list(metadataFailedRun.keys())\n if 'jobHandler' in metadataKeys:\n metadataKeys.pop(metadataKeys.index(\"jobHandler\"))\n metadataToKeep = { keepKey: metadataFailedRun[keepKey] for keepKey in metadataKeys }\n # FIXME: The running.command was always internal now, so I removed it.\n # We should probably find a way to give more pertinent information.\n self.raiseAMessage(f\" Process Failed {running.identifier}:{running} internal returnCode {returnCode}\")\n self.__failedJobs[running.identifier]=(returnCode,copy.deepcopy(metadataToKeep))", "def test_get_job_executions(self):\n url = '/%s/jobs/%d/executions/' % (self.api, self.job_1.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n job_exe_count = results['count']\n self.assertEqual(job_exe_count, 4)\n #check that we order by descending exe_num\n self.assertEqual(results['results'][0]['exe_num'], 4)", "def getResults(self, cleanup=True):\n self.wait_on_job()\n stdout_str = self.ofile_string()\n stderr_str = self.efile_string()\n if cleanup:\n self.erase_files()\n return (stdout_str, stderr_str)", "def qstat_job_details(jid, state=None, queue_name=None):\n command = '%s -j %d -xml' % (QSTAT_PATH, jid)\n result_xml = subprocess.check_output([command], env=ENV, shell=True)\n root_element = xml.etree.ElementTree.fromstring(result_xml)\n job_info_element = root_element[0][0]\n job_mail_list = job_info_element.find('JB_mail_list')[0]\n stdout_path_list = job_info_element.find('JB_stdout_path_list')\n stderr_path_list = job_info_element.find('JB_stderr_path_list')\n hard_queue_list = job_info_element.find('JB_hard_queue_list')\n if hard_queue_list is not None:\n destination_ident_list = hard_queue_list.find('destin_ident_list')\n qr_name = destination_ident_list[0]\n else:\n qr_name = None\n predecessors = []\n predecessor_list = job_info_element.find('JB_jid_predecessor_list')\n if predecessor_list is not None:\n job_predecessors = predecessor_list.find('job_predecessors')\n if job_predecessors is not None:\n for predecessor in job_predecessors:\n predecessors.append(int(predecessor.text))\n job_details = {\n 'job_id': int(job_info_element.find('JB_job_number').text),\n 'owner': job_info_element.find('JB_owner').text,\n 'name': job_info_element.find('JB_job_name').text,\n 'executable': job_info_element.find('JB_script_file').text,\n 'qr_name': qr_name.text if qr_name is not None else '',\n 'predecessors': predecessors,\n 'stdout_path': _text_or_none(stdout_path_list[0], 'PN_path') if stdout_path_list else '',\n 'stderr_path': _text_or_none(stderr_path_list[0], 'PN_path') if stderr_path_list else '',\n 'priority': job_info_element.find('JB_priority').text,\n 'submission_timestamp': job_info_element.find('JB_submission_time').text\n }\n if state:\n job_details['state'] = state\n if queue_name:\n job_details['queue_name'] = queue_name\n # Get job args\n job_args = []\n job_arg_list = job_info_element.find('JB_job_args')\n if job_arg_list:\n for e in job_arg_list:\n job_args.append(e[0].text)\n job_details['job_args'] = job_args\n # Get environment\n env = {}\n job_env_list = job_info_element.find('JB_env_list')\n for e in job_env_list:\n variable_name = e[0].text\n if len(e) > 1:\n variable_value = e[1].text\n else:\n variable_value = ''\n env[variable_name] = variable_value\n job_details['env'] = env\n return job_details", "def exitCode(self):\n return self._ExitCode", "def process_resp(self, msg, operation, status, index):\n metric = \"%s.%d.%s\" % (METRIC_NAME, index, operation)\n self.results.append(Event(TIMESTAMP_MILLIS(), \"opentsdb\", metric, msg, status))\n if status == \"0\":\n self.cause.extend(msg)\n metric = \"%s.%d.%s\" % (METRIC_NAME, index, \"health\")\n analyse_status = MonitorStatus[\"red\"]\n self.results.append(Event(TIMESTAMP_MILLIS(), \"opentsdb\", metric, msg, analyse_status))", "def execute_handler_action(self):\n try:\n # fetch seq_no\n self.seq_no = self.ext_config_settings_handler.get_seq_no(is_enable_request=True)\n if self.seq_no is None:\n self.logger.log_error(\"Sequence number for current operation not found\")\n exit(Constants.ExitCode.ConfigurationError)\n\n # read status file, to load any preserve existing context\n self.ext_output_status_handler.read_file(self.seq_no)\n\n config_settings = self.ext_config_settings_handler.read_file(self.seq_no)\n\n # set activity_id in telemetry\n if self.telemetry_writer is not None:\n self.telemetry_writer.set_operation_id(config_settings.__getattribute__(self.config_public_settings.activity_id))\n\n operation = config_settings.__getattribute__(self.config_public_settings.operation)\n\n # Allow only certain operations\n if operation not in [Constants.NOOPERATION, Constants.ASSESSMENT, Constants.INSTALLATION, Constants.CONFIGURE_PATCHING]:\n self.logger.log_error(\"Requested operation is not supported by the extension\")\n self.ext_output_status_handler.write_status_file(operation, self.seq_no, status=Constants.Status.Error.lower(), message=\"Requested operation {0} is not supported by the extension\".format(str(operation)), code=Constants.ExitCode.OperationNotSupported)\n exit(Constants.ExitCode.OperationNotSupported)\n\n prev_patch_max_end_time = self.cmd_exec_start_time + datetime.timedelta(hours=0, minutes=Constants.ENABLE_MAX_RUNTIME)\n self.ext_state_handler.create_file(self.seq_no, operation, prev_patch_max_end_time)\n core_state_content = self.core_state_handler.read_file()\n\n # log tmp folder size\n self.ext_env_handler.log_temp_folder_details()\n\n # if NoOperation is requested, terminate all running processes from previous operation and update status file\n if operation == Constants.NOOPERATION:\n self.process_nooperation(config_settings, core_state_content)\n else:\n # if any of the other operations are requested, verify if request is a new request or a re-enable, by comparing sequence number from the prev request and current one\n if core_state_content is None or core_state_content.__getattribute__(self.core_state_fields.number) is None:\n # first patch request for the VM\n self.logger.log(\"No state information was found for any previous patch operation. Launching a new patch operation.\")\n self.launch_new_process(config_settings, create_status_output_file=True)\n else:\n if int(core_state_content.__getattribute__(self.core_state_fields.number)) != int(self.seq_no):\n # new request\n self.process_enable_request(config_settings, prev_patch_max_end_time, core_state_content)\n else:\n # re-enable request\n self.process_reenable_request(config_settings, core_state_content)\n\n except Exception as error:\n self.logger.log_error(\"Failed to execute enable. [Exception={0}]\".format(repr(error)))\n raise", "async def set_job_status(job_id: str) -> int:\n async with js.WDBConnection() as conn:\n async with conn.cursor() as cur:\n num_rows_affected = await cur.execute(FINALIZE_SUBMISSION_SQL, (job_id,))\n await conn.commit()\n return num_rows_affected", "def test_results_errors(self, affiliate_items):\n updater = mock.Mock(side_effect=ValueError())\n batch_job = BatchJob(affiliate_items, updater)\n\n error_count = 0\n for result in batch_job.run():\n error_count += int(result.is_error)\n\n assert error_count == 4", "def do_status(self, args):\n status = self._leet.job_status\n\n for job in self.finished_jobs:\n status.append({\"id\" : job.id,\n \"hostname\" : job.machine.hostname,\n \"plugin\": job.plugin_instance.LEET_PG_NAME,\n \"status\" : job.status})\n if status:\n pretty_jobs_status(status)\n else:\n print(\"***No jobs pending\")", "def post_wrapper(wcl, ins, jobfiles, logfile, exitcode, workdir):\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"BEG\")\n #logfile = None\n # Save disk usage for wrapper execution\n disku = 0\n if workdir is not None:\n disku = pfwutils.diskusage(os.getcwd())\n\n # outputwcl and log are softlinks skipped by diskusage command\n # so add them individually\n if os.path.exists(wcl[pfwdefs.IW_WRAPSECT]['outputwcl']):\n disku += os.path.getsize(wcl[pfwdefs.IW_WRAPSECT]['outputwcl'])\n if os.path.exists(logfile):\n disku += os.path.getsize(logfile)\n else:\n disku = pfwutils.diskusage(wcl['jobroot'])\n wcl['wrap_usage'] = disku - wcl['pre_disk_usage']\n\n # don't save logfile name if none was actually written\n if not os.path.isfile(logfile):\n logfile = None\n\n outputwclfile = wcl[pfwdefs.IW_WRAPSECT]['outputwcl']\n if not os.path.exists(outputwclfile):\n outputwclfile = None\n\n filemgmt = dynam_load_filemgmt(wcl, None)\n\n finfo = {}\n\n excepts = []\n\n # always try to save log file\n logfinfo = save_log_file(filemgmt, wcl, jobfiles, logfile)\n if logfinfo is not None and logfinfo:\n finfo.update(logfinfo)\n\n outputwcl = WCL()\n if outputwclfile and os.path.exists(outputwclfile):\n with open(outputwclfile, 'r') as outwclfh:\n outputwcl.read(outwclfh, filename=outputwclfile)\n\n # add wcl file to list of non-junk output files\n jobfiles['outfullnames'].append(outputwclfile)\n\n # if running in a fw thread\n if workdir is not None:\n\n # undo symbolic links to input files\n for sect in ins:\n for fname in ins[sect]:\n os.unlink(fname)\n\n #jobroot = os.getcwd()[:os.getcwd().find(workdir)]\n jobroot = wcl['jobroot']\n\n # move any output files from fw thread working dir to job scratch dir\n if outputwcl is not None and outputwcl and \\\n pfwdefs.OW_OUTPUTS_BY_SECT in outputwcl and \\\n outputwcl[pfwdefs.OW_OUTPUTS_BY_SECT]:\n for byexec in outputwcl[pfwdefs.OW_OUTPUTS_BY_SECT].values():\n for elist in byexec.values():\n files = miscutils.fwsplit(elist, ',')\n for _file in files:\n subdir = os.path.dirname(_file)\n if subdir != \"\":\n newdir = os.path.join(jobroot, subdir)\n miscutils.coremakedirs(newdir)\n\n # move file from fw thread working dir to job scratch dir\n shutil.move(_file, os.path.join(jobroot, _file))\n\n # undo symbolic links to log and outputwcl dirs\n os.unlink('log')\n os.unlink('outputwcl')\n os.unlink('inputwcl')\n if os.path.exists('list'):\n os.unlink('list')\n\n os.chdir(jobroot) # change back to job scratch directory from fw thread working dir\n cleanup_dir(workdir, True)\n\n # handle output files - file metadata, prov, copying to archive\n if outputwcl is not None and outputwcl:\n pfw_hdrupd = get_pfw_hdrupd(wcl)\n execs = intgmisc.get_exec_sections(outputwcl, pfwdefs.OW_EXECPREFIX)\n for sect in execs:\n print \"DESDMTIME: app_exec %s %0.3f\" % (sect, float(outputwcl[sect]['walltime']))\n\n if pfwdefs.OW_OUTPUTS_BY_SECT in outputwcl and \\\n outputwcl[pfwdefs.OW_OUTPUTS_BY_SECT]:\n badfiles = []\n wrap_output_files = []\n for sectname, byexec in outputwcl[pfwdefs.OW_OUTPUTS_BY_SECT].items():\n sectkeys = sectname.split('.')\n sectdict = wcl.get('%s.%s' % (pfwdefs.IW_FILESECT, sectkeys[-1]))\n filesave = miscutils.checkTrue(pfwdefs.SAVE_FILE_ARCHIVE, sectdict, True)\n filecompress = miscutils.checkTrue(pfwdefs.COMPRESS_FILES, sectdict, False)\n\n updatedef = {}\n # get any hdrupd secton from inputwcl\n for key, val in sectdict.items():\n if key.startswith('hdrupd'):\n updatedef[key] = val\n\n # add pfw hdrupd values\n updatedef['hdrupd_pfw'] = pfw_hdrupd\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"sectname %s, updatedef=%s\" % \\\n (sectname, updatedef))\n\n for _, elist in byexec.items():\n fullnames = miscutils.fwsplit(elist, ',')\n wrap_output_files.extend(fullnames)\n filepat = None\n if 'filepat' in sectdict:\n if sectdict['filepat'] in wcl['filename_pattern']:\n filepat = wcl['filename_pattern'][sectdict['filepat']]\n else:\n raise KeyError('Missing file pattern (%s, %s, %s)' % (sectname,\n sectdict['filetype'],\n sectdict['filepat']))\n try:\n badfiles.extend(pfw_save_file_info(filemgmt, sectdict['filetype'],\n fullnames, True, updatedef, filepat))\n except Exception, e:\n miscutils.fwdebug_print('An error occurred')\n (extype, exvalue, trback) = sys.exc_info()\n traceback.print_exception(extype, exvalue, trback, file=sys.stdout)\n excepts.append(e)\n for fname in fullnames:\n if fname in badfiles:\n continue\n finfo[fname] = {'sectname': sectname,\n 'filetype': sectdict['filetype'],\n 'filesave': filesave,\n 'filecompress': filecompress,\n 'fullname': fname}\n if 'archivepath' in sectdict:\n finfo[fname]['path'] = sectdict['archivepath']\n\n wrap_output_files = list(set(wrap_output_files))\n if badfiles:\n miscutils.fwdebug_print(\"An error occured during metadata ingestion the following file(s) had issues: %s\" % \\\n', '.join(badfiles))\n (extype, exvalue, trback) = sys.exc_info()\n traceback.print_exception(extype, exvalue, trback, file=sys.stdout)\n\n excepts.append(Exception(\"An error occured during metadata ingestion the following file(s) had issues: %s\" % ', '.join(badfiles)))\n for f in badfiles:\n if f in wrap_output_files:\n wrap_output_files.remove(f)\n\n jobfiles['outfullnames'].extend(wrap_output_files)\n # update input files\n for isect in ins:\n for ifile in ins[isect]:\n jobfiles['infullnames'].append(ifile)\n\n if finfo:\n save_trans_end_of_job(wcl, jobfiles, finfo)\n copy_output_to_archive(wcl, jobfiles, finfo, 'wrapper', 'wrapper_output', exitcode)\n\n # clean up any input files no longer needed - TODO\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"END\\n\\n\")\n if excepts:\n raise Exception('An exception was raised. See tracebacks further up the output for information.')", "def test_check_bundle_7(self):\n import_genome.check_bundle(self.bndl,\n ticket_ref=\"ticket\",\n file_ref=\"flat_file\",\n retrieve_ref=\"phagesdb_x\",\n retain_ref=\"mysql\")\n count = count_status(self.bndl, \"error\", \"warning\")\n with self.subTest():\n self.assertEqual(len(self.bndl.evaluations), 6)\n with self.subTest():\n self.assertEqual(count, 1)", "def test_successful_execution(self):\n\n url = '/%s/jobs/%i/' % (self.api, self.job.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n\n self.assertEqual(result['execution']['job']['id'], self.job.id)\n self.assertEqual(result['execution']['job_type']['id'], self.job_type.id)\n self.assertEqual(result['execution']['exe_num'], self.job_exe.exe_num)", "def errdump_analysis(errdump_df, switchshow_df, switch_params_aggregated_df, \n portshow_aggregated_df, project_constants_lst):\n \n # imported project constants required for module execution\n project_steps_df, max_title, io_data_names_df, _, report_headers_df, report_columns_usage_sr, *_ = project_constants_lst\n\n # data titles obtained after module execution (output data)\n # data titles which module is dependent on (input data)\n data_names, analyzed_data_names = dfop.list_from_dataframe(io_data_names_df, 'errorlog_analysis_out', 'errorlog_analysis_in')\n # module information\n meop.show_module_info(project_steps_df, data_names)\n # read data from database if they were saved on previos program execution iteration\n data_lst = dbop.read_database(project_constants_lst, *data_names)\n \n # force run when any output data from data_lst is not found in database or \n # procedure execution explicitly requested (force_run flag is on) for any output or input data \n force_run = meop.verify_force_run(data_names, data_lst, project_steps_df, \n max_title, analyzed_data_names)\n if force_run:\n # data imported from init file (regular expression patterns) to extract values from data columns\n pattern_dct, _ = sfop.regex_pattern_import('raslog_split', max_title)\n raslog_message_details_df = sfop.dataframe_import('raslog_details', max_title)\n raslog_message_id_details_df = sfop.dataframe_import('raslog_id_details', max_title, columns=['Message_ID', 'Details', 'Recommended_action'])\n\n # current operation information string\n info = f'Counting RASLog messages'\n print(info, end =\" \")\n\n # get aggregated DataFrames\n errdump_aggregated_df = errdump_aggregated(errdump_df, switchshow_df, switch_params_aggregated_df, \n portshow_aggregated_df, pattern_dct)\n # count how many times event appears during one month for the last six months \n raslog_counter_df, raslog_frequent_df = errdump_statistics(errdump_aggregated_df, raslog_message_details_df, raslog_message_id_details_df)\n # after finish display status\n meop.status_info('ok', max_title, len(info)) \n # partition aggregated DataFrame to required tables\n raslog_report_df = raslog_report(raslog_frequent_df, data_names, report_headers_df, report_columns_usage_sr)\n\n # create list with partitioned DataFrames\n data_lst = [errdump_aggregated_df, raslog_counter_df, raslog_report_df]\n # writing data to sql\n dbop.write_database(project_constants_lst, data_names, *data_lst)\n # verify if loaded data is empty and replace information string with empty DataFrame\n else:\n data_lst = dbop.verify_read_data(max_title, data_names, *data_lst)\n errdump_aggregated_df, raslog_counter_df, *_ = data_lst\n # save data to service file if it's required\n for data_name, data_frame in zip(data_names, data_lst):\n report.dataframe_to_excel(data_frame, data_name, project_constants_lst)\n return errdump_aggregated_df, raslog_counter_df", "def test_successful_job(self):\n\n successful_job = json.loads(TREEHERDER_JOB % (\"success\", \"completed\"))\n self.assertEquals(self.query_api.get_job_status(successful_job), SUCCESS)", "def set_test_property_values(self):\n self.set_single_value(self._ok_wrapper.entry,\n jwrap._JOB_ID, EXPECTED_ID)\n self.set_single_value(self._request_wrapper.entry,\n jwrap._JOB_GROUP_NAME,\n EXPECTED_GROUP_NAME)\n self.set_single_value(self._request_wrapper.entry,\n jwrap._JOB_OPERATION_NAME,\n EXPECTED_OPERATION_NAME)\n self.set_single_value(self._failed_wrapper.entry,\n jwrap._JOB_STATUS,\n EXPECTED_STATUS)\n self.set_single_value(self._exception_wrapper.entry,\n jwrap._JOB_MESSAGE,\n EXPECTED_EXCEPTION_MESSAGE)\n # results value containing the message is the second one in a list\n props = self._failed_wrapper.entry.element.findall(\n jwrap._JOB_RESULTS_VALUE)\n props[1].text = str(EXPECTED_RESULTS_VALUE)", "def get_returncode(self, *arguments, **kwargs):\n kwargs.update(\n dict(stdout=None,\n stderr=None,\n loglevel=logging.INFO))\n kwargs.setdefault('env', self.env)\n command_result = get_command_result(\n self.git_command, *arguments, **kwargs)\n return command_result.returncode", "def run(self):\n\n try:\n self.send_alert('SUCCESS', \"Job START\", None)\n self.cron_process()\n self.send_alert('SUCCESS', \"Job FINAL\", None)\n sys.exit(0)\n\n except Exception:\n alert_data = self.build_error_output()\n self.send_alert('FAILURE', 'Failure during Job', alert_data)\n sys.exit(1)", "def get_search_status(self, args=None):\r\n result = {'Task': \"GetSearchStatus\", \"Status\": \"complete\", \"Error\": \"NoError\", \"JobProgress\": '0',\r\n \"DataSources\": [], \"TotalBytes\": 0, \"JobID\": args}\r\n\r\n matching_data = 0\r\n keys = []\r\n values = []\r\n id_to_key_dict = dict()\r\n app_dict = dict()\r\n\r\n with EndaceWebSession(app_url=self.applianceurl, username=self.username, password=self.password,\r\n cert_verify=self.cert_verify) as sess:\r\n api = EndaceVisionAPIAdapter(sess)\r\n path = \"files\"\r\n rd = api.get(path)\r\n if rd.status_code == 200:\r\n path = \"queries/\" + args\r\n progress_status = True\r\n query_time = calendar.timegm(time.gmtime())\r\n while progress_status:\r\n # progress loop\r\n # exit when whichever occurrs before, search timeout or search progress = 100% or search returns an\r\n # unknown value\r\n current_time = calendar.timegm(time.gmtime())\r\n if current_time - query_time > self.delta_time:\r\n progress_status = False\r\n result['Status'] = \"InProgress\"\r\n result['Error'] = \"SearchTimeOut\"\r\n else:\r\n rj = api.get(path)\r\n if rj.status_code == 200:\r\n # Check metadata for no error.\r\n try:\r\n response = rj.json()\r\n except json.decoder.JSONDecodeError:\r\n raise Exception(f\"JsonDecodeError - path {path}\")\r\n else:\r\n meta = response.get(\"meta\", {})\r\n if meta:\r\n meta_error = meta.get(\"error\")\r\n if meta_error is not None:\r\n if meta_error is not False:\r\n progress_status = False\r\n result['Status'] = \"complete\"\r\n result['Error'] = str(meta_error)\r\n else:\r\n # check payload for no error\r\n payload = response.get(\"payload\")\r\n if payload is not None:\r\n progress = payload.get(\"progress\")\r\n if progress is not None:\r\n result['JobProgress'] = str(progress)\r\n # check if the Search Job has finished.\r\n # if so, return a data dict back to Demisto\r\n # if No, Wait and loop in to run another status check,\r\n # until \"self.delta_time\" has elapsed\r\n payload_data = payload.get(\"data\")\r\n if payload_data is not None:\r\n if int(progress) == 100:\r\n progress_status = False\r\n for data_map_dict in payload_data:\r\n id_to_key_dict[data_map_dict['id']] = \\\r\n data_map_dict['name']\r\n\r\n for top_key in payload[\"top_keys\"]:\r\n keys.append(id_to_key_dict[top_key])\r\n\r\n # Calculate Total matching MBytes\r\n for top_value in payload[\"top_values\"]:\r\n matching_data = matching_data + int(top_value)\r\n values.append(str(top_value))\r\n\r\n result['TotalBytes'] = int(matching_data)\r\n\r\n for index in range(len(keys)):\r\n app_dict[keys[index]] = values[index] + ' Bytes'\r\n\r\n result['Status'] = str(payload['state'])\r\n result['DataSources'] = keys\r\n else:\r\n progress_status = False\r\n result['Status'] = \"Failed\"\r\n result['Error'] = f\"ServerError - empty payload data from {path}\"\r\n else:\r\n progress_status = False\r\n result['Status'] = \"Failed\"\r\n result['Error'] = f\"ServerError - empty meta data from {path}\"\r\n else:\r\n progress_status = False\r\n result['Status'] = rj.status_code\r\n result['Error'] = f\"ServerError - HTTP {rj.status_code} to /{path}\"\r\n # wait time before next run\r\n time.sleep(self.wait_time)\r\n else:\r\n result['Status'] = \"Failed\"\r\n result['Error'] = f\"ServerError - HTTP {rd.status_code} to /{path}\"\r\n\r\n if result['Status'] != 'complete':\r\n self.handle_error_notifications(result['Error'])\r\n return result", "def check_job_exists( job_list, analysis_group_id, reprocess_config_id):\n for job in job_list:\n struct = JSONMessage.unserialize(job.input_message)\n\n if( int( struct.analysis_group_id ) == int( analysis_group_id ) and \\\n int( struct.reprocess_config_id ) == int( reprocess_config_id ) ):\n return 1\n return 0", "def createCfg_analyze(self, jobOptions): \n lines = []\n ##lines.append(\"process.fwliteInput.fileNames = cms.vstring(%s)\" % [ os.path.basename(inputFile) for inputFile in inputFiles ])\n lines.append(\"process.fwliteInput.fileNames = cms.vstring(%s)\" % jobOptions['ntupleFiles'])\n lines.append(\"process.fwliteOutput.fileName = cms.string('%s')\" % os.path.basename(jobOptions['histogramFile']))\n lines.append(\"process.analyze_jetToTauFakeRate.process = cms.string('%s')\" % jobOptions['sample_category'])\n lines.append(\"process.analyze_jetToTauFakeRate.era = cms.string('%s')\" % self.era)\n lines.append(\"process.analyze_jetToTauFakeRate.triggers_1e = cms.vstring(%s)\" % self.triggers_1e)\n lines.append(\"process.analyze_jetToTauFakeRate.use_triggers_1e = cms.bool(%s)\" % (\"1e\" in jobOptions['triggers']))\n lines.append(\"process.analyze_jetToTauFakeRate.triggers_1mu = cms.vstring(%s)\" % self.triggers_1mu)\n lines.append(\"process.analyze_jetToTauFakeRate.use_triggers_1mu = cms.bool(%s)\" % (\"1mu\" in jobOptions['triggers']))\n lines.append(\"process.analyze_jetToTauFakeRate.triggers_1e1mu = cms.vstring(%s)\" % self.triggers_1e1mu)\n lines.append(\"process.analyze_jetToTauFakeRate.use_triggers_1e1mu = cms.bool(%s)\" % (\"1e1mu\" in jobOptions['triggers']))\n lines.append(\"process.analyze_jetToTauFakeRate.chargeSelection = cms.string('%s')\" % jobOptions['charge_selection'])\n lines.append(\"process.analyze_jetToTauFakeRate.jet_minPt = cms.double('%f')\" % jobOptions['jet_minPt'])\n lines.append(\"process.analyze_jetToTauFakeRate.jet_maxPt = cms.double('%f')\" % jobOptions['jet_maxPt'])\n lines.append(\"process.analyze_jetToTauFakeRate.jet_minAbsEta = cms.double('%f')\" % jobOptions['jet_minAbsEta'])\n lines.append(\"process.analyze_jetToTauFakeRate.jet_maxAbsEta = cms.double('%f')\" % jobOptions['jet_maxAbsEta'])\n lines.append(\"process.analyze_jetToTauFakeRate.hadTauSelection_denominator = cms.string('%s')\" % jobOptions['hadTau_selection_denominator'])\n lines.append(\"process.analyze_jetToTauFakeRate.hadTauSelections_numerator = cms.vstring(\")\n for hadTau_selection in jobOptions['hadTau_selections_numerator']:\n lines.append(\" '%s',\" % hadTau_selection)\n lines.append(\")\")\n lines.append(\"process.analyze_jetToTauFakeRate.absEtaBins = cms.vdouble(%s)\" % jobOptions['absEtaBins'])\n lines.append(\"process.analyze_jetToTauFakeRate.use_HIP_mitigation_mediumMuonId = cms.bool(%s)\" % jobOptions['use_HIP_mitigation_mediumMuonId'])\n lines.append(\"process.analyze_jetToTauFakeRate.isMC = cms.bool(%s)\" % jobOptions['is_mc'])\n lines.append(\"process.analyze_jetToTauFakeRate.central_or_shift = cms.string('%s')\" % jobOptions['central_or_shift'])\n lines.append(\"process.analyze_jetToTauFakeRate.lumiScale = cms.double(%f)\" % jobOptions['lumi_scale'])\n lines.append(\"process.analyze_jetToTauFakeRate.apply_genWeight = cms.bool(%s)\" % jobOptions['apply_genWeight'])\n lines.append(\"process.analyze_jetToTauFakeRate.apply_trigger_bits = cms.bool(%s)\" % jobOptions['apply_trigger_bits'])\n create_cfg(self.cfgFile_analyze, jobOptions['cfgFile_modified'], lines)", "def check_slurm_job_submission(expected_name):\n cmd = ['scontrol', 'show', 'job']\n job_id = 0\n found_job = False\n while True:\n while True:\n try:\n out = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()[0]\n break\n except:\n sleep(1)\n out = out.split('\\n')\n if 'error' in out[0]:\n sleep(1)\n msg = 'Error checking job status for {0}'.format(expected_name)\n logging.warning(msg)\n continue\n for line in out:\n for word in line.split():\n if 'JobId' in word:\n index = word.find('=') + 1\n job_id = int(word[index:])\n # continue\n if 'Name' in word:\n index = word.find('=') + 1\n if word[index:] == expected_name:\n found_job = True\n\n if found_job and job_id != 0:\n return found_job, job_id\n sleep(1)\n return found_job, job_id", "def ReturnCode(rc):\r\n return _hiew.ReturnCode(rc)", "def handle_upld_data(self, job):\n if job.data[\"high_reliability\"]:\n # Upload the file to all servers\n self.put_job_in_all_queues(job)\n\n list_job_results = self.get_internal_results_from_all_servers()\n\n if len(list_job_results) == 0:\n # We got no responses back, there are probably no servers active\n self.put_external_result(self.generate_failure_job(\"Unsuccessful, no servers responded\"))\n return\n\n # Check all the servers had success\n for result in list_job_results:\n if result.result[\"outcome\"] != \"success\":\n self.put_external_result(\n self.generate_failure_job(\"Unsuccessful, one of the servers did not have success\"))\n return\n\n # Tell the client we successfully uploaded to all servers\n response_result = copy.deepcopy(list_job_results[0])\n response_result.processed_by = None\n self.put_external_result(response_result)\n\n else:\n\n # Check we recognise the token\n if job.token not in self.UPLD_TOKEN_DICT:\n print(\"UNRECOGNISED TOKEN: {}\".format(job.token))\n return\n\n # Pass the job onto the server associated with the job token\n server_name = self.UPLD_TOKEN_DICT[job.token]\n self.put_job_in_specific_server_queue(job, server_name)\n\n # Get the result from the server and pass it back to the client\n result = self.get_internal_result_from_server(server_name,\n timeout=30\n if job.data[\"file_size\"] > 2 * 2 ** 20 else 4)\n self.put_external_result(result)", "def _parse_output(self, output):\n matched = False\n line_number = 0\n lines = output.strip().split(\"\\n\")\n for line in lines:\n line_number += 1\n line = line.rstrip()\n # import pdb; pdb.set_trace()\n if re.match(re.compile(r\"\\[SUCCESS\\]\"), line) and self.run_status != \"WARN\":\n self.run_status = \"SUCCESS\"\n self.run_summary = line\n self.run_status_details = self._parse_output_status_details(lines[line_number:])\n matched = True\n elif re.match(re.compile(r\"\\[FAILURE\\]\"), line):\n self.run_status = \"FAILURE\"\n self.run_summary = line\n self.run_status_details = self._parse_output_status_details(lines[line_number:])\n matched = True\n break\n elif re.match(re.compile(r\"\\[WARN\\]\"), line):\n self.run_status = \"WARN\"\n self.run_summary = line\n self.run_status_details = self._parse_output_status_details(lines[line_number:])\n matched = True\n if not matched:\n self.run_status = \"UNKNOWN\"\n self.run_summary = \"[UNKNOWN] log missing SUCCESS, FAILURE, or WARN message.\"\n\n return matched", "def report_status(self):\n if self.errno != -1:\n print fmt(\"[-] Xor: failure at index %d\" % self.errno, RED)\n else:\n print fmt(\"[+] Xor: success\", GREEN)", "def readJobs():\n\tfwpathsher,fwpathsunc = '/scratch/users/ksb/fireworks/jobs/','/nfs/slac/g/suncatfs/ksb/fireworks/jobs/'\n\texistingJobs = [str(x[0]) for x in dbase.sqlexecute('SELECT launchdir from job')]\n\tls = subprocess.Popen(['ssh','ksb@suncatls1.slac.stanford.edu', 'cd %s;ls'%fwpathsunc], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\tsuncout, err \t= ls.communicate()\n\n\tsuncatJobs \t\t= [fwpathsunc + d for d in suncout.split('\\n') \t\tif fwpathsunc+d not in existingJobs and len(d)>1]\n\tsherlockJobs \t= [fwpathsher + x for x in os.listdir(fwpathsher) \tif fwpathsher+x not in existingJobs]\n\ttot = len(suncatJobs + sherlockJobs)\n\tfor i,d in enumerate(suncatJobs + sherlockJobs):\n\t\tprint d\n\t\tprint '%d/%d'%(i+1,tot) ; sys.stdout.write(\"\\033[F\") # Cursor up one line\n\t\tfwid = getFWID(d)\n\t\tdeleted = int(os.path.exists(d+'/deleted'))\n\t\tinputDict = misc.mergeDicts([{'fwid':fwid,'launchdir':d,'deleted':deleted},getInitData(fwid)])\n\n\t\tcommand = \"INSERT into job ({0}) values ({1})\".format(\t','.join(inputDict.keys())\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t,','.join(['?']*len(inputDict)))\n\t\ttry: dbase.sqlexecute(command,inputDict.values())\n\t\texcept: #remove 'bad keys'\n\t\t\tfor k in ['relax','vacancies_json']:\n\t\t\t\ttry: del inputDict[k]\n\t\t\t\texcept KeyError: pass\n\t\t\tcommand = \"INSERT into job ({0}) values ({1})\".format(\t','.join(inputDict.keys()),','.join(['?']*len(inputDict)))\n\t\t\tdbase.sqlexecute(command,inputDict.values())", "def examine_job(self):\n if self.data is None:\n print(\"Could not download job id\", self.job_id)\n return\n self.duration = self.read_data(\"duration\")\n self.between_commands = self.read_data(\"between_commands\")\n\n print(\"---\")\n print(\"test_id: {}\".format(self.job_id))\n print(\"duration:\")\n Job.print_results(self.duration)\n print(\"between_commands:\")\n Job.print_results(self.between_commands)\n print(\"\")", "def _handle_general_error(self, calculation):\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report('Calculation failed for a reason that can not be resolved automatically')\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.ERROR_SOMETHING_WENT_WRONG)", "def test_check_bundle_8(self):\n import_genome.check_bundle(self.bndl,\n ticket_ref=\"ticket\",\n file_ref=\"flat_file\",\n retrieve_ref=\"phagesdb\",\n retain_ref=\"mysql_x\")\n count = count_status(self.bndl, \"error\", \"warning\")\n with self.subTest():\n self.assertEqual(len(self.bndl.evaluations), 6)\n with self.subTest():\n self.assertEqual(count, 2)", "def check_jobs(self):\n # New/aborted jobs\n try:\n jobs = self.sm.get_job('%', phase = 'QUEUED')\n for job in jobs:\n self._launch_job(Job(job['job']))\n res = self.sm.get_aborted_jobs()\n aborts = [x['identifier'] for x in res]\n # Completed jobs\n for t in self.threads:\n if t.isDone() or t.name in aborts:\n self.threads.remove(t)\n # Set job status to COMPLETED\n job = Job(self.sm.get_job(t.name)[0]['job'])\n if t._Future__excpt == None:\n job.set_phase('COMPLETED')\n if t._Future__result != None:\n job.set_results(t._Future__result) \n status = True\n else:\n job.set_phase('ERROR')\n job.set_error_summary(str(t._Future__excpt[1]).replace(\"'\", \"\"))\n status = False\n job.set_end_time(datetime.utcnow().isoformat())\n self.sm.update_job(job = job, completed = status)\n except Exception, e:\n print \"Error:\", e", "def main(self):\n self.jamf_url = self.env.get(\"JSS_URL\")\n self.jamf_user = self.env.get(\"API_USERNAME\")\n self.jamf_password = self.env.get(\"API_PASSWORD\")\n self.ea_script_path = self.env.get(\"ea_script_path\")\n self.ea_name = self.env.get(\"ea_name\")\n self.replace = self.env.get(\"replace_ea\")\n self.ea_data_type = self.env.get(\"ea_data_type\")\n self.ea_inventory_display = self.env.get(\"ea_inventory_display\")\n self.sleep = self.env.get(\"sleep\")\n # handle setting replace in overrides\n if not self.replace or self.replace == \"False\":\n self.replace = False\n\n # clear any pre-existing summary result\n if \"jamfextensionattributeuploader_summary_result\" in self.env:\n del self.env[\"jamfextensionattributeuploader_summary_result\"]\n ea_uploaded = False\n\n # handle files with a relative path\n if not self.ea_script_path.startswith(\"/\"):\n found_template = self.get_path_to_file(self.ea_script_path)\n if found_template:\n self.ea_script_path = found_template\n else:\n raise ProcessorError(f\"ERROR: EA file {self.ea_script_path} not found\")\n\n # now start the process of uploading the object\n self.output(f\"Checking for existing '{self.ea_name}' on {self.jamf_url}\")\n\n # obtain the relevant credentials\n token, send_creds, _ = self.handle_classic_auth(\n self.jamf_url, self.jamf_user, self.jamf_password\n )\n\n # check for existing - requires obj_name\n obj_type = \"extension_attribute\"\n obj_name = self.ea_name\n obj_id = self.get_api_obj_id_from_name(\n self.jamf_url,\n obj_name,\n obj_type,\n enc_creds=send_creds,\n token=token,\n )\n\n if obj_id:\n self.output(\n \"Extension Attribute '{}' already exists: ID {}\".format(\n self.ea_name, obj_id\n )\n )\n if self.replace:\n self.output(\n \"Replacing existing Extension Attribute as 'replace_ea' is set to {}\".format(\n self.replace\n ),\n verbose_level=1,\n )\n else:\n self.output(\n \"Not replacing existing Extension Attribute. Use replace_ea='True' to enforce.\",\n verbose_level=1,\n )\n return\n\n # upload the EA\n self.upload_ea(\n self.jamf_url,\n self.ea_name,\n self.ea_data_type,\n self.ea_inventory_display,\n self.ea_script_path,\n obj_id=obj_id,\n enc_creds=send_creds,\n token=token,\n )\n ea_uploaded = True\n\n # output the summary\n self.env[\"extension_attribute\"] = self.ea_name\n self.env[\"ea_uploaded\"] = ea_uploaded\n if ea_uploaded:\n self.env[\"jamfextensionattributeuploader_summary_result\"] = {\n \"summary_text\": (\n \"The following extension attributes were created or \"\n \"updated in Jamf Pro:\"\n ),\n \"report_fields\": [\"name\", \"path\"],\n \"data\": {\"name\": self.ea_name, \"path\": self.ea_script_path},\n }", "def check_upload(job_id, file, mainchain = None):\n ## NOTE:\n ## - Requires uploaded structures to be X-ray EXPDTA\n ## - Checks if the PDB file contains valid aa/na residues\n ## - PDB file must have at least 30 ATOMs\n ## - PDB file can not have lowercase alt. res. numbers\n ## - Checks standard deviation of temp. factors\n ## - Checks that not all occupancies are 0.00\n ## - Checks for properly formatted ATOM lines\n tmpfile = None ## this is the second part of the return\n atom_num = []\n res_type = []\n res_num = []\n chain = []\n temp_factors = []\n bad_std = -1\n num_total = 0\n num_good = 0\n occupancy = 0.0\n ignore = 0\n line_num = 0\n\n for line in file:\n line_num += 1\n\n if line.startswith('HEADER'):\n header_id = re.sub(r\"^HEADER.{56}(....)\", '\\\\1', line).strip()\n ## FIXME: Calls to MySQL can not be made in this def, 2009-06-16\n #mysql.job_set_header_id(job_id, str(header_id))\n\n #if line.startswith('EXPDTA NMR') or \\\n # line.startswith('EXPDTA SOLUTION NMR'):\n # ## TODO: Might need to add \"SOLID-STATE NMR\", 2009-11-10\n # msg = \"NMR structure! \"\n # msg += \"Please do not submit NMR structures, theoretical models, \"\n # msg += \"or any PDB file with unrefined Bs.\"\n # return msg\n\n elif line.startswith('EXPDTA') and line.find('X-RAY DIFFRACTION') == -1:\n msg = \"Not an X-ray diffraction structure. TLSMD currently only \"\n msg += \"performs analysis on X-ray models. Will not proceed.\"\n return msg, tmpfile\n\n elif re.match(r'^REMARK 2 RESOLUTION\\. ([0-9\\.]{1,}) ANGSTROMS.*', line):\n resolution = re.sub(r'^REMARK 2 RESOLUTION\\. ([0-9\\.]{1,}) ANGSTROMS.*', '\\\\1', line).strip()\n ## FIXME: Calls to MySQL can not be made in this def, 2009-06-16\n #mysql.job_set_resolution(job_id, resolution)\n\n elif re.match('^ATOM.....................[0-9][a-z]', line):\n ## E.g., Don't allow \"100b\". Force it to be \"100B\"\n example = re.sub(r'^ATOM.....................([0-9][a-z]).*', '\\\\1', line).strip()\n msg = \"Please change lowercase to uppercase for alternate \"\n msg += \"residue numbers. (E.g., change \\\" %s \\\" to \\\" %s \\\")\" % (\n example, example.upper())\n return msg, tmpfile\n\n elif mainchain == True and line.startswith('ATOM') and \\\n const.RE_MAINCHAIN_ATOMS.match(line) and \\\n Library.library_is_standard_residue(line[17:20].strip()):\n ## Only pass mainchain atoms to the running_stddev() function\n tmpfile = misc.generate_security_code()\n num_total += 1\n\n try:\n int(line[7:11].strip())\n int(line[23:26].strip())\n float(line[56:60].strip())\n float(line[60:66].strip())\n except:\n return \"Not a proper ATOM line: <pre>%s</pre>\" % line, tmpfile\n\n if float(line[56:60].strip()) < 1.00:\n ## ignore occupancies < 1.00\n ignore += 1\n continue\n else:\n num_good += 1\n atom_num.append(int(line[7:11].strip()))\n res_type.append(line[17:20].strip())\n res_num.append(int(line[23:26].strip()))\n chain.append(line[21:22])\n occupancy += float(line[56:60].strip())\n temp_factors.append(float(line[60:66].strip()))\n\n elif mainchain == False and line.startswith('ATOM') and (\n Library.library_is_standard_residue(line[17:20].strip())):\n tmpfile = job_id\n num_total += 1\n\n try:\n int(line[7:11].strip())\n int(line[23:26].strip())\n float(line[56:60].strip())\n float(line[60:66].strip())\n except:\n return \"Not a proper ATOM line: <pre>%s</pre>\" % line, tmpfile\n\n if float(line[56:60].strip()) < 1.00:\n ## ignore occupancies < 1.00\n ignore += 1\n continue\n else:\n num_good += 1\n atom_num.append(int(line[7:11].strip()))\n res_type.append(line[17:20].strip())\n res_num.append(int(line[23:26].strip()))\n chain.append(line[21:22])\n occupancy += float(line[56:60].strip())\n temp_factors.append(float(line[60:66].strip()))\n\n else:\n continue\n\n #return \"Number of atoms: %s (%s) (%s)\" % (num_total, len(temp_factors), num_good)\n\n ## TODO: Add check for ANISOU that are pure ISOT, 2010-03-23\n\n ## FIXME: This does not work yet.\n #if(ignore == num_total):\n # return \"All occupancies are less than 1.0, so all atoms will be ignored. Nothing to do.\"\n\n msg = \"Not a PDB structure or has unrecognized residue names.\"\n if mainchain and num_good < 5:\n return msg, tmpfile\n elif not mainchain and num_good < 30:\n return msg, tmpfile\n\n if(occupancy / num_good == 0.0):\n return \"All occupancies are 0.0. TLSMD won't run on this structure.\", tmpfile\n\n bad_std, tmpfile = running_stddev(tmpfile, atom_num, res_type, res_num, \n chain, temp_factors)\n if bad_std > 0:\n ## If there are a string of \"bad\" B-factors, return a plot showing the\n ## \"bad\" regions and do not proceed any further in the analysis.\n f = open('%s/%s.gnu' % (conf.WEBTMP_PATH, tmpfile), 'w')\n\n ## modify script template\n script = _STDDEV_FOR_BAD_TFACT_TEMPLATE\n script = script.replace(\"<webtmp_path>\", conf.WEBTMP_PATH)\n script = script.replace(\"<tmpfile>\", tmpfile)\n script = script.replace(\"<gnuplot_font>\", conf.GNUPLOT_FONT)\n #script = script.replace(\"<min_stddev_bfact>\", conf.MIN_STDDEV_BFACT)\n #script = script.replace(\"<max_stddev_bfact>\", conf.MAX_STDDEV_BFACT)\n\n f.write(script)\n f.close()\n subprocess.Popen([r\"%s\" % conf.GNUPLOT, \"%s/%s.gnu\" % (\n conf.WEBTMP_PATH, tmpfile)]).wait()\n\n return_string = \"Standard deviation of temperature factors is less \"\n return_string += \"than %s or greater than %s for those residues in \" % (\n conf.MIN_STDDEV_BFACT, conf.MAX_STDDEV_BFACT)\n return_string += \"the shaded regions below:<br>\"\n return_string += \"<center><img src='%s/%s.png'/></center>\" % (\n conf.WEBTMP_URL, tmpfile)\n return_string += \"<br><h3>NOTE: Your structure was run through a \"\n return_string += \"sanity check twice: (1) using all atoms in your \"\n return_string += \"structure; and (2) using only the mainchain atoms \"\n return_string += \"({N,CA,C,O,CB} or {P,O5*,C5*,C4*,C3*,O3*}). \"\n return_string += \"Both sanity checks failed.</h3>\"\n return return_string, tmpfile\n\n return '', tmpfile" ]
[ "0.64432395", "0.5260613", "0.52343196", "0.52212924", "0.519622", "0.5182771", "0.51703864", "0.51282525", "0.5100127", "0.5092974", "0.50409675", "0.503647", "0.5012631", "0.50123405", "0.49673423", "0.49517173", "0.49453866", "0.49341914", "0.49084947", "0.49002942", "0.48976606", "0.48916396", "0.48898607", "0.488616", "0.48647535", "0.48443025", "0.48214337", "0.48166254", "0.48102736", "0.48077518", "0.48042068", "0.4800753", "0.4780471", "0.47770908", "0.47551388", "0.4752131", "0.47412097", "0.4730217", "0.47264573", "0.47239792", "0.4715868", "0.4712787", "0.47099406", "0.47088325", "0.47083408", "0.47078446", "0.47049347", "0.46937063", "0.46902043", "0.46744797", "0.46729973", "0.46720344", "0.46664676", "0.46606502", "0.46459085", "0.4643058", "0.4616007", "0.46012664", "0.45996192", "0.4597669", "0.45962146", "0.45950824", "0.45923215", "0.45913073", "0.45846334", "0.45783243", "0.45729795", "0.45707238", "0.4564079", "0.4560984", "0.45601577", "0.45414424", "0.4534023", "0.4533865", "0.4528831", "0.452762", "0.45219478", "0.45214498", "0.45198897", "0.45052463", "0.45045072", "0.4499542", "0.44988412", "0.44971976", "0.44938156", "0.44929495", "0.44890016", "0.44878832", "0.44863284", "0.4481366", "0.4475955", "0.4470366", "0.44686738", "0.44655815", "0.44590825", "0.44580078", "0.44567525", "0.44566646", "0.44534874", "0.44522905" ]
0.71490014
0
Move output of job already retrieved into the correct backup directory
def moveOutput(self,id, max_id,path,file): Dir_Base=path +'Submission_' for i in range(1, max_id): if not os.path.isdir( Dir_Base + str(i) + '/'): cmd=('mkdir '+ Dir_Base + str(i) + '/ >& /dev/null') cmd_out = runCommand(cmd) common.logger.debug(str(cmd_out)) cmd='mv '+ path + file + ' ' + Dir_Base + str(max_id -1) + '/ >& /dev/null' try: cmd_out = runCommand(cmd) common.logger.debug(cmd_out) except: msg = 'no output to move for job '+str(id) common.logger.debug(msg) pass return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transfer_job_to_single_archive(wcl, saveinfo, dest, task_label):\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"TRANSFER JOB TO ARCHIVE SECTION\")\n archive_info = wcl['%s_archive_info' % dest.lower()]\n tstats = None\n if 'transfer_stats' in wcl:\n tstats = pfwutils.pfw_dynam_load_class(wcl, 'stats_' + task_label, wcl['transfer_stats'], None)\n\n # dynamically load class for job_file_mvmt\n if 'job_file_mvmt' not in wcl:\n msg = \"Error: Missing job_file_mvmt in job wcl\"\n raise KeyError(msg)\n\n jobfilemvmt = None\n jobfilemvmt = dynam_load_jobfilemvmt(wcl, tstats)\n\n # tranfer files to archive\n if dest.lower() == 'target':\n res = jobfilemvmt.job2target(saveinfo)\n else:\n res = jobfilemvmt.job2home(saveinfo, wcl['verify_files'])\n\n arc = \"\"\n if 'home_archive' in wcl and 'archive' in wcl:\n ha = wcl['home_archive']\n if ha in wcl['archive'] and 'root_http' in wcl['archive'][ha]:\n arc = ' (' + wcl['archive'][wcl['home_archive']]['root_http'] + ')'\n\n # register files that we just copied into archive\n problemfiles = {}\n for fkey, finfo in res.items():\n if 'err' in finfo:\n problemfiles[fkey] = finfo\n msg = \"Warning: Error trying to copy file %s to %s archive%s: %s\" % \\\n (fkey, dest, arc, finfo['err'])\n print msg\n\n if problemfiles:\n print \"ERROR\\n\\n\\nError: putting %d files into archive %s\" % \\\n (len(problemfiles), archive_info['name'])\n print \"\\t\", problemfiles.keys()\n raise Exception(\"Error: problems putting %d files into archive %s\" %\n (len(problemfiles), archive_info['name']))", "def backup_output_path(self):\n backup_path = TaskOps().backup_base_path\n if backup_path is None:\n return\n FileOps.copy_folder(TaskOps().local_output_path, backup_path)", "def cleanup(job, tempOutputFileStoreID, outputFile, cores=1, memory=sortMemory, disk=\"3G\"):\n fileName = job.fileStore.readGlobalFile(tempOutputFileStoreID)\n shutil.copyfile(fileName, outputFile)\n job.fileStore.logToMaster(\"Finished copying sorted file to output: %s\" % outputFile)", "def backup_command(server, output):\n # Stop saving chunks\n server.save_off()\n # Run the external save program\n subprocess.call(CONFIG['backup_command']['script'].split())\n # Start saving chunks again\n server.save_on()\n return", "def move_from_temp_directory(self):", "def backup_test_results(manager, targetname=\"extractor_test_results/\"):\n \n print \"Zalohuji vysledky...\"\n \n t = time.time()\n tstamp = str(dt.datetime.fromtimestamp(t))\n tstamp = re.sub(r'\\s', '__', tstamp)\n tstamp = re.sub(r'[\\:\\.]', '-', tstamp)\n\n destination = targetname + tstamp + \"/\"\n \n fnames = {\"data_positive_filenames\": positives,\n \"data_negative_filenames\": negatives,\n \"data_HNM_filenames\": hnms,\n \"draw_positive_filenames\": positives_to_draw,\n \"draw_negative_filenames\": negatives_to_draw,\n \"draw_HNM_filenames\": hnms_to_draw}\n \n fm.copytree(targetname+\"HoG\", destination)\n dr.zapis_json(fnames, destination+\"test_filenames.json\")\n fm.copyfile(\"test_HoGs.py\", destination+\"/test_HoGs.py\")\n \n# if to_train:\n# fm.copyfile(\"classification/evaluation/train_evaluation.json\", \n# destination+\"/evaluation_train.json\")\n \n print \"Hotovo\"", "def process_backup_mode(self):\n if self.backup_mode:\n previous_backup_path = self.original_destination_path / 'current'\n self.current_destination_path /= datetime.today().isoformat()\n if previous_backup_path.is_dir():\n self.rsync_command.append('--link-dest=' + str(previous_backup_path))\n logging.debug('Destination directory for backup: %s', self.current_destination_path)", "def copy_output_to_archive(wcl, jobfiles, fileinfo, level, task_label, exitcode):\n # fileinfo[filename] = {filename, fullname, sectname}\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"BEG\")\n putinfo = {}\n\n\n # check each output file definition to see if should save file\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"Checking for save_file_archive\")\n\n for (filename, fdict) in fileinfo.items():\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"filename %s, fdict=%s\" % (filename, fdict))\n (filename, compression) = miscutils.parse_fullname(fdict['fullname'],\n miscutils.CU_PARSE_FILENAME|miscutils.CU_PARSE_COMPRESSION)\n\n putinfo[filename] = {'src': fdict['fullname'],\n 'compression': compression,\n 'filename': filename,\n 'filetype': fdict['filetype'],\n 'filesave': fdict['filesave'],\n 'filecompress': fdict['filecompress'],\n 'path': fdict['path']}\n\n transfer_job_to_archives(wcl, jobfiles, putinfo, level, task_label, exitcode)\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"END\\n\\n\")", "def get_output(self, output_dir=\"tools_output\"):\n\n output_dir = self.project_dir / output_dir / self.name\n # create output directory if didn't exist\n if not output_dir.exists():\n os.makedirs(output_dir)\n logger.info(f\"Created {output_dir}\")\n\n for outfile in self.output:\n outfile = self.project_dir / outfile\n if outfile.exists():\n src = os.fspath(outfile)\n dst = os.fspath(output_dir / outfile.name)\n shutil.move(src, dst)\n logger.info(f\"Moved {outfile.name} to {output_dir}\")\n else:\n msg = f\"File not found: {outfile} - did you execute run() before?\"\n logger.error(msg)\n raise FileNotFoundError(msg)", "def backup(self):\n self.rollback_steps.insert(0, self.mongos.start_balancer)\n self.run_step(self.mongos.stop_balancer, 2)\n\n self.run_step(self.wait_for_locks)\n\n self.rollback_steps.insert(0, self.finish_shards_maintenance)\n self.run_step(self.prepare_shards_maintenance)\n\n self.run_step(self.backup_dump)\n\n self.rollback_steps.remove(self.finish_shards_maintenance)\n self.run_step(self.finish_shards_maintenance, 2)\n\n self.rollback_steps.remove(self.mongos.start_balancer)\n self.run_step(self.mongos.start_balancer, 4) # it usually starts on\n # the second try\n\n if self.backup_bucket is not None:\n run(\"rmdir %s\" % self.backup_path)\n\n logging.info(\"Finished successfully\")", "def export(self, out=None):\n if out is None:\n host = api.registered_host()\n workfile = host.current_file() or \"temp\"\n out = os.path.abspath(workfile + \".sftp.job\")\n\n with open(out, \"w\") as file:\n json.dump(self.jobs, file, indent=4)\n\n return out", "def _store_test_result(ptfhost):\n logger.info(\"Copying file from folder: {0} to folder: {1}\".format(\n\t\tSAI_TEST_REPORT_TMP_DIR_ON_PTF, \n\t\tSAI_TEST_REPORT_DIR_ON_PTF))\n ptfhost.shell(\"cp {0}/*.* {1}/\".format(\n\t\tSAI_TEST_REPORT_TMP_DIR_ON_PTF, \n\t\tSAI_TEST_REPORT_DIR_ON_PTF))", "def postProcessOutput(self):\n\n logging.info(\" ========> Analysis %20s called postProcessOutput:\"%(self.name))\n\n if self.checkExpectedOutputFiles() == False:\n raise Exception(\"Missing expected output files. Number missing are [%d]\"%(len(self.missing_output_files)))\n\n FileUtils.checkDirExists(self.output_dir)\n\n tmpfiles = []\n\n logging.info(\" ========> Analysis %20s called postProcessOutput: Moving files from %s to %s \"%(self.name,self.working_dir,self.output_dir))\n try:\n for srcfile in self.expected_output_files:\n\n fullsrcfile = os.path.join(self.working_dir,srcfile)\n destfile = os.path.join(self.output_dir,srcfile)\n\n FileUtils.checkDirExistsForFile(destfile)\n\n res = shutil.move(fullsrcfile,destfile)\n\n if res == None:\n res = \"OK\"\n else:\n res = \"FAILED\"\n\n print \"Checking %s\"%destfile\n tmpfiles.append(destfile)\n \n logging.info(\" ========> Analysis %20s called postProcessOutput: Result of file move for %s = %s\" % (self.name,srcfile,res))\n\n except Exception as e:\n logging.info(\" ========> Analysis %20s file move failed %s\"%(self.name,e))\n raise\n\n self.output_files = tmpfiles\n\n for f in self.temp_output_files:\n logging.info(\" ========> Analysis %20s removing temp file %s \"%(self.name,f))\n\t res = os.remove(f)", "def backup(self):\n import datetime\n suffix = datetime.datetime.now().strftime('%Y-%m-%d--%H-%M-%S')\n self.host.run(\"test -f '%s' && cp --archive '%s' '%s.%s'\" % (\n esc1(self.remote_path), esc1(self.remote_path), esc1(self.remote_path), esc1(suffix)), use_sudo=self.use_sudo)", "def seafile_backup():\n global jobIds\n\n updateRcloneJobStatus()\n\n if any(jobId != None for _, jobId in jobIds.items()):\n abort(423, \"A job is already running\")\n\n # First, backup the databases (per Seafile documentation)\n # https://manual.seafile.com/maintain/backup_recovery/#backup-order-database-first-or-data-directory-first\n # requests.post(\"http://seafile-db:34770/seafile-backup\", headers={\n # Authorization: f'Bearer {authSecret}'\n # })\n\n # Second, queue all the rclone jobs\n fprint(\"Queue'ing up rclone jobs\")\n jobsData = {\n # Backup all the seafile files to remote (backblze B2)\n \"remote\": {\n \"srcFs\": 'battoseafile:',\n \"dstFs\": 'battob2:b4tto-seafile-backup-2',\n },\n # Backup all the seafile files to local backup\n \"local\": {\n \"srcFs\": 'battoseafile:',\n \"dstFs\": '/backup-local-dest/files',\n },\n # TODO: Readd the backups for the db and db data\n # Probably need their own bucket\n # Backup all the seafile db and config files to remote (backblaze B2)\n # \"dbRemote\": {\n # \"srcFs\": 'battoseafile:',\n # \"dstFs\": 'battob2:b4tto-seafile-backup-2',\n # },\n # # Backup all the seafile db and config files to local\n # \"dbLocal\": {\n # \"srcFs\": 'battoseafile:',\n # \"dstFs\": 'battob2:b4tto-seafile-backup-2',\n # }\n # ... and the data ones\n }\n for jobName, jobData in jobsData.items():\n fprint(f\"Queue'ing up rclone job '{jobName}'\")\n resp = requests.post(\"http://test:test@seafile-backups:5572/sync/sync\", data={\n **jobsData,\n \"_async\": True\n }, headers={\n 'Authorization': 'Basic dGVzdDp0ZXN0'\n })\n json = resp.json()\n fprint(json)\n jobIds[jobName] = json[\"jobid\"]\n fprint(f\"Rclone job '{jobName}' got id '{jobIds[jobName]}'\")\n\n return \"success\"", "def backup(self, outdir=None):\n import os\n if outdir is None:\n import time\n outdir = os.path.join('backup',time.strftime('%Y%m%d-%H%M'))\n cmd = 'time mongodump -c \"%s\" -h %s:%s -d mfdb -o \"%s\"'%(\n self.collection.name, self.db.host, self.db.port, outdir)\n print cmd\n os.system(cmd)", "def move_tbimported_in_finished(self, data):\r\n conf = self.func.config_info()\r\n folder_name = self.bid_folder_name() \r\n\r\n if \"ProofreaderStatus\" in list(data.keys()):\r\n if data[\"ProofreaderStatus\"] == \"FINISHED\":\r\n files = os.listdir(conf[\"path_to_batches_tbimported\"])\r\n if folder_name in files:\r\n src = os.path.join(conf[\"path_to_batches_tbimported\"], folder_name)\r\n dst = os.path.join(conf[\"path_to_batches_finished\"], folder_name)\r\n self.func.move_folder(src, dst)\r\n\r\n if not self.func.folder_exists(dst):\r\n raise Exception(\"Folder {} not moved in '6 FINISHED'!\".format(folder_name))\r\n else:\r\n raise Exception(\"Folder {} not found in '5 TO BE IMPORTED'!\".format(folder_name))", "def output_transfer_prep(wcl, jobfiles, putinfo, task_label, exitcode):\n\n mastersave = wcl.get(pfwdefs.MASTER_SAVE_FILE).lower()\n mastercompress = wcl.get(pfwdefs.MASTER_COMPRESSION)\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"%s: mastersave = %s\" % (task_label, mastersave))\n miscutils.fwdebug_print(\"%s: mastercompress = %s\" % (task_label, mastercompress))\n\n # make archive rel paths for transfer\n saveinfo = {}\n for key, fdict in putinfo.items():\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"putinfo[%s] = %s\" % (key, fdict))\n should_save = pfwutils.should_save_file(mastersave, fdict['filesave'], exitcode)\n if should_save:\n if 'path' not in fdict:\n miscutils.fwdebug_print(\"Error: Missing path (archivepath) in file definition\")\n print key, fdict\n sys.exit(1)\n should_compress = pfwutils.should_compress_file(mastercompress,\n fdict['filecompress'],\n exitcode)\n fdict['filecompress'] = should_compress\n fdict['dst'] = \"%s/%s\" % (fdict['path'], os.path.basename(fdict['src']))\n saveinfo[key] = fdict\n\n call_compress_files(wcl, jobfiles, saveinfo)\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"After compress saveinfo = %s\" % (saveinfo))\n\n return saveinfo", "def _backup(self, parsed_args):\n if self.backup:\n dep_sys = self.document['deploymentSystem']\n dep_path = self.document['deploymentPath']\n backup_dep_path = dep_path + '.' + str(seconds())\n\n print_stderr('Backing up agave://{}/{}'.format(dep_sys, dep_path))\n start_time = milliseconds()\n self.messages.append(\n ('backup', 'src: agave://{}/{}'.format(dep_sys, dep_path)))\n self.messages.append(\n ('backup', 'dst: agave://{}/{}'.format(dep_sys,\n backup_dep_path)))\n\n try:\n # TODO - only do this if dep_path exists, otherwise an Exception will be raised\n manage.move(dep_path,\n system_id=dep_sys,\n destination=backup_dep_path,\n agave=self.tapis_client)\n print_stderr('Finished ({} msec)'.format(milliseconds() -\n start_time))\n return True\n except Exception as exc:\n if self.ignore_errors:\n self.messages.append(('backup', str(exc)))\n print_stderr('Failed ({} msec)'.format(milliseconds() -\n start_time))\n return False\n else:\n raise\n\n return True", "def terminated(self):\n # Cleanup tmp file\n try:\n os.remove(self.tmp_filename)\n except Exception, ex:\n gc3libs.log.error(\"Failed removing temporary file %s. \" % self.tmp_filename +\n \"Error type %s. Message %s\" % (type(ex), str(ex)))\n\n if not self.local_output_file:\n # outputs = gc3libs.ANY_OUTPUT\n for path in os.path.listdir(self.output_dir):\n if os.path.isfile(path) and path.startswith('pos'):\n # We assume this is the output file to retrieve\n self.local_output_file = path\n self.local_result_output_file = os.path.join(self.result_dir,path)\n\n # copy output file `pos*` in result_dir\n if not os.path.isfile(self.local_output_file):\n gc3libs.log.error(\"Output file %s not found\" \n % self.local_output_file)\n self.execution.returncode = (0, 100)\n else:\n try:\n shutil.move(self.local_output_file, \n self.local_result_output_file)\n except Exception, ex:\n gc3libs.log.error(\"Failed while transferring output file \" +\n \"%s \" % self.local_output_file +\n \"to result folder %s. \" % self.result_dir +\n \"Error type %s. Message %s. \" \n % (type(ex),str(ex)))\n \n self.execution.returncode = (0, 100)", "def run(self):\n\n os.chdir(LOCAL_DIR)\n print('Writing to local directory..')\n W_SEM.acquire()\n if not self.jobqueue:\n print('No files in jobqueue.')\n S_SEM.release()\n else:\n while self.jobqueue:\n job_tuple = self.jobqueue.pop(0)\n job, file = job_tuple\n jobcode = job[0]\n if jobcode[:4] == 'SYNC':\n continue\n if jobcode == 'CP':\n src, dest = job[1], job[2]\n with open(dest, 'wb') as f:\n f.write(file)\n f.close()\n if jobcode == 'CPDIR':\n src, dest = job[1], job[2]\n try:\n os.mkdir(dest)\n except FileExistsError:\n print(dest,' already exists.')\n elif jobcode == 'RM':\n dest = job[1]\n os.remove(dest)\n elif jobcode == 'RMDIR':\n dest = job[1]\n shutil.rmtree(dest)\n if jobcode == 'DONE':\n print('Done writing to local directory!')\n S_SEM.release()\n break", "def transfer_single_archive_to_job(wcl, files2get, jobfiles, dest):\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"BEG\")\n\n archive_info = wcl['%s_archive_info' % dest.lower()]\n\n res = None\n transinfo = get_file_archive_info(wcl, files2get, jobfiles,\n archive_info,)\n\n if len(transinfo) != len(files2get):\n badfiles = []\n for file_name in files2get:\n if file_name not in transinfo.keys():\n badfiles.append(file_name)\n raise Exception(\"Error: the following files did not have entries in the database:\\n%s\" % (\", \".join(badfiles)))\n if transinfo:\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"\\tCalling target2job on %s files\" % len(transinfo))\n starttime = time.time()\n tasktype = '%s2job' % dest\n tstats = None\n if 'transfer_stats' in wcl:\n tstats = pfwutils.pfw_dynam_load_class(wcl, 'stats_' + tasktype, wcl['transfer_stats'], None)\n\n jobfilemvmt = dynam_load_jobfilemvmt(wcl, tstats)\n\n if dest.lower() == 'target':\n res = jobfilemvmt.target2job(transinfo)\n else:\n res = jobfilemvmt.home2job(transinfo)\n\n print \"DESDMTIME: %s2job %0.3f\" % (dest.lower(), time.time()-starttime)\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"END\\n\\n\")\n\n return res", "def backup(self):\n if self.url is not None:\n\n # zip backup folder\n zipapp.create_archive(self.logs_directory, self.send_zip)\n\n # then send zipped folder to the URL\n try:\n requests.post(self.url, files={\n 'uploaded_file': (os.path.basename(self.send_zip), open(self.send_zip, 'rb')),\n })\n except requests.exceptions.ConnectionError as error:\n print(error)", "def transfer_job_to_archives(wcl, jobfiles, putinfo, level, task_label, exitcode):\n # level: current calling point: wrapper or job\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"BEG %s %s\" % (level, task_label))\n miscutils.fwdebug_print(\"len(putinfo) = %d\" % len(putinfo))\n miscutils.fwdebug_print(\"putinfo = %s\" % putinfo)\n\n level = level.lower()\n job2target = 'never'\n if pfwdefs.USE_TARGET_ARCHIVE_OUTPUT in wcl:\n job2target = wcl[pfwdefs.USE_TARGET_ARCHIVE_OUTPUT].lower()\n job2home = 'never'\n if pfwdefs.USE_HOME_ARCHIVE_OUTPUT in wcl:\n job2home = wcl[pfwdefs.USE_HOME_ARCHIVE_OUTPUT].lower()\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"job2target = %s\" % job2target)\n miscutils.fwdebug_print(\"job2home = %s\" % job2home)\n\n if putinfo:\n saveinfo = None\n if level == job2target or level == job2home:\n saveinfo = output_transfer_prep(wcl, jobfiles, putinfo,\n task_label, exitcode)\n\n if level == job2target:\n transfer_job_to_single_archive(wcl, saveinfo, 'target',\n task_label)\n\n if level == job2home:\n transfer_job_to_single_archive(wcl, saveinfo, 'home',\n task_label)\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"END\\n\\n\")", "def backup_parrec():\n \n backup_dir = os.getcwd() + '/rawdata_backup'\n\n if not os.path.exists(backup_dir):\n os.mkdir(backup_dir)\n\n PAR_files = glob.glob('*.PAR')\n REC_files = glob.glob('*.REC')\n\n to_move = zip(PAR_files, REC_files)\n \n for PAR,REC in to_move:\n shutil.move(PAR, backup_dir)\n shutil.move(REC, backup_dir)\n \n print \"Back-up completed for %i files\" % (len(to_move))", "def backup(self):\r\n print('Backing up old files...')\r\n\r\n # Connect with SSH-PubKey and execute backup script\r\n subprocess.run(\r\n ['ssh',\r\n '-i', self.ssh_key,\r\n '-o', 'StrictHostKeyChecking=no',\r\n 'robot@{}'.format(self.settings['ip']),\r\n 'robolab-backup'\r\n ])\r\n\r\n print('Done.')", "def transfer_archives_to_job(wcl, neededfiles):\n # transfer files from target/home archives to job scratch dir\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"BEG\")\n if miscutils.fwdebug_check(6, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"neededfiles = %s\" % neededfiles)\n\n files2get = neededfiles.keys()\n\n arc = \"\"\n if 'home_archive' in wcl and 'archive' in wcl:\n ha = wcl['home_archive']\n if ha in wcl['archive'] and 'root_http' in wcl['archive'][ha]:\n arc = ' (' + wcl['archive'][wcl['home_archive']]['root_http'] + ')'\n\n if files2get and wcl[pfwdefs.USE_TARGET_ARCHIVE_INPUT].lower() != 'never':\n res = transfer_single_archive_to_job(wcl, files2get, neededfiles,\n 'target')\n\n if res is not None and res:\n problemfiles = {}\n for fkey, finfo in res.items():\n if 'err' in finfo:\n problemfiles[fkey] = finfo\n msg = \"Warning: Error trying to get file %s from target archive%s: %s\" % \\\n (fkey, arc, finfo['err'])\n print msg\n\n files2get = list(set(files2get) - set(res.keys()))\n if problemfiles:\n print \"Warning: had problems getting input files from target archive%s\" % arc\n print \"\\t\", problemfiles.keys()\n files2get += problemfiles.keys()\n else:\n print \"Warning: had problems getting input files from target archive%s.\" % arc\n print \"\\ttransfer function returned no results\"\n\n\n # home archive\n if files2get and pfwdefs.USE_HOME_ARCHIVE_INPUT in wcl and \\\n wcl[pfwdefs.USE_HOME_ARCHIVE_INPUT].lower() == 'wrapper':\n res = transfer_single_archive_to_job(wcl, files2get, neededfiles,\n 'home')\n\n if res is not None and res:\n problemfiles = {}\n for fkey, finfo in res.items():\n if 'err' in finfo:\n problemfiles[fkey] = finfo\n msg = \"Warning: Error trying to get file %s from home archive%s: %s\" % \\\n (fkey, arc, finfo['err'])\n print msg\n\n files2get = list(set(files2get) - set(res.keys()))\n if problemfiles:\n print \"Warning: had problems getting input files from home archive%s\" % arc\n print \"\\t\", problemfiles.keys()\n files2get += problemfiles.keys()\n else:\n print \"Warning: had problems getting input files from home archive%s.\" % arc\n print \"\\ttransfer function returned no results\"\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"END\\n\\n\")\n return files2get", "def destage_output(output_dict):\n for _, desc in output_dict.items():\n if isinstance(desc, dict):\n if desc['class'] == 'File':\n location = urlparse(desc['location'])\n dest_path = os.path.join(os.getcwd(), os.path.basename(location.path))\n shutil.move(location.path, dest_path)\n desc['location'] = 'file://' + dest_path\n\n return output_dict", "def save_trans_end_of_job(wcl, jobfiles, putinfo):\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"BEG\")\n miscutils.fwdebug_print(\"len(putinfo) = %d\" % len(putinfo))\n\n job2target = 'never'\n if pfwdefs.USE_TARGET_ARCHIVE_OUTPUT in wcl:\n job2target = wcl[pfwdefs.USE_TARGET_ARCHIVE_OUTPUT].lower()\n job2home = 'never'\n if pfwdefs.USE_HOME_ARCHIVE_OUTPUT in wcl:\n job2home = wcl[pfwdefs.USE_HOME_ARCHIVE_OUTPUT].lower()\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"job2target = %s\" % job2target)\n miscutils.fwdebug_print(\"job2home = %s\" % job2home)\n\n if putinfo:\n # if not end of job and transferring at end of job, save file info for later\n if job2target == 'job' or job2home == 'job':\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"Adding %s files to save later\" % len(putinfo))\n jobfiles['output_putinfo'].update(putinfo)\n\n if miscutils.fwdebug_check(3, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"END\\n\\n\")", "def backup_directory(self, source_directory, destination_directory):\n pass", "def move_to_dest(self):\n destpath = self._move_to_dest(self.outfile, self.tempdir, self.destdir)\n\n if destpath:\n self.all_files.append(destpath)\n download_logger.info('Completed {}'.format(destpath))\n\n with self._lock:\n self.outfile = \"\"", "def _process_task_output(self):\n # pylint: disable=too-many-branches\n directory = self._executor.out_dir\n if not os.path.exists(directory):\n return\n try:\n for root, _dirs, files in os.walk(directory):\n for name in files:\n filepath = os.path.join(root, name)\n # the name should match what is in the db!\n\n if name == 'output.json':\n log.debug(\"POSTRO FOUND output.json\")\n # parse and compare/update with the tasks output ports from db\n output_ports = dict()\n with open(filepath) as f:\n output_ports = json.load(f)\n task_outputs = self._task.output\n for to in task_outputs:\n if to['key'] in output_ports.keys():\n to['value'] = output_ports[to['key']]\n log.debug(\"POSTRPO to['value]' becomes %s\", output_ports[to['key']])\n flag_modified(self._task, \"output\")\n _session = self._db.Session()\n try:\n _session.commit()\n except exc.SQLAlchemyError as e:\n log.debug(e)\n _session.rollback()\n finally:\n _session.close()\n else:\n # we want to keep the folder structure\n if not root == directory:\n rel_name = os.path.relpath(root, directory)\n name = rel_name + \"/\" + name\n\n object_name = str(self._task.project_id) + \"/\" + self._task.node_id + \"/\" + name\n success = False\n ntry = 3\n trial = 0\n while not success and trial < ntry:\n log.debug(\"POSTRO pushes to S3 %s try %s from %s\", object_name, trial, ntry)\n success = self._s3.client.upload_file(self._s3.bucket, object_name, filepath)\n trial = trial + 1\n\n except (OSError, IOError) as _e:\n logging.exception(\"Could not process output\")", "def restore(ctx, destination, filesystem, backup_time):\n config_path = ctx.obj['config_path']\n\n config = Config(config_path)\n job = config.jobs.get(filesystem)\n\n if job is None:\n print('Filesystem does not exist.')\n sys.exit(1)\n\n job.restore(backup_time, destination)\n\n print('Restore successful.')", "def output():\n\n print(\"\\n*****************************************************************\")\n print(\"\\nAll transfer data is saved in 'All_transfer_frequencies.csv'\")\n print(\"\\nThe most likely transfers are saved in 'likely_transfers.csv'\")\n\n os.mkdir(\"Transfer_results\")\n os.system(\"mv *.csv Transfer_results\")\n\n print(\"\\nBoth results are saved in the 'Transfer_results' directory\")\n print(\"\\nScript finished running\")\n print(\"\\n*****************************************************************\")", "def move_home_pypackage_back():\n\n backups = defaultdict(list)\n home = os.path.expanduser(\"~\")\n for file_name in os.listdir(home):\n if \".pypackage\" in file_name and file_name.endswith(\"~\"):\n file_path = os.path.join(home, file_name)\n backups[os.stat(file_path).st_ctime].append(file_path)\n\n shutil.move(\n max(backups[max(backups)]), # the longest of the lastest created\n os.path.join(home, \".pypackage\"),\n )", "def archive_backup(self):\n\n # Archiving the Training script\n shutil.copyfile(self.script_path, self.save_path + '/0-' + os.path.basename(self.script_path))\n os.chmod(self.save_path + '/0-' + os.path.basename(self.script_path), 0o755)\n # Archiving the src folder\n pkg_path = os.path.dirname(arch_src)\n backup_path = os.path.join(self.save_path, 'src_backup')\n shutil.make_archive(backup_path, 'gztar', pkg_path)\n\n # Archiving the Environment Info\n env_info = collect_env.get_pretty_env_info()\n with open(self.save_path + '/env_info.txt', 'w') as f:\n f.write(env_info)", "def backup(ctx):\n config_path = ctx.obj['config_path']\n logger = ctx.obj['logger']\n\n config = Config(config_path)\n scheduler = BlockingScheduler(\n executors={'default': ThreadPoolExecutor(max_workers=1)}\n )\n\n for job in config.jobs.values():\n logger.info(f'filesystem={job.filesystem} '\n f'cron=\"{job.cron}\" '\n 'msg=\"Adding job.\"')\n scheduler.add_job(job.start, 'cron', **job.cron, coalesce=True)\n\n try:\n scheduler.start()\n except (KeyboardInterrupt, SystemExit):\n pass", "def __makeBackup(self):\n pass #FIXME!!!", "def backup(self):\n\n if not File.backup_text(self.get_title()): return\n if Settings.get_destination() == \"remote\":\n Remote.upload_file(self)\n elif Settings.get_destination() == \"google\":\n Google.upload_file(file=self)\n else:\n # move file to local backup location\n backupPath = os.path.join(Settings.get_local_path(), \"posted\")\n backupPath = os.path.join(backupPath, self.category, self.get_title())\n shutil.move(self.get_path(), backupPath)", "def qmove(queue, job_id):\n ssh = connect_server()\n if isinstance(job_id, JobStatus):\n i,o,e = ssh.exec_command(qmove_c + queue + ' ' + job_id.id)\n else:\n i,o,e = ssh.exec_command(qmove_c + queue + ' ' + job_id)\n\n qmove_output = o.readlines() + e.readlines()\n ssh.close()", "def _get_job_commands(self, fasta_fps, output_dir, params, job_prefix,\r\n working_dir, command_prefix=None,\r\n command_suffix='; exit'):\r\n # Create basenames for each of the output files. These will be filled\r\n # in to create the full list of files created by all of the runs.\r\n out_filenames = [job_prefix + '.%d_blast_out.txt']\r\n\r\n command_prefix = command_prefix or \\\r\n '/bin/bash; export BLASTMAT=%s;' % params['blastmat_dir']\r\n\r\n if not params['disable_low_complexity_filter']:\r\n complexity_filter_str = 'T'\r\n else:\r\n complexity_filter_str = 'F'\r\n\r\n # Create lists to store the results.\r\n commands = []\r\n result_filepaths = []\r\n\r\n # Iterate over the input files.\r\n for i, fasta_fp in enumerate(fasta_fps):\r\n # Each run ends with moving the output file from the tmp dir to\r\n # the output_dir. Build the command to perform the move here.\r\n # rename_command, current_result_filepaths = \\\r\n # self._get_rename_command([fn % i for fn in out_filenames],\r\n # working_dir, output_dir)\r\n #result_filepaths += current_result_filepaths\r\n\r\n # TODO should this be put in self._get_rename_command()?\r\n infile_basename = splitext(split(fasta_fp)[1])[0]\r\n working_outfile_path = '%s/%s_blast_out.txt' %\\\r\n (working_dir, infile_basename)\r\n outfile_path = '%s/%s_blast_out.txt' % (output_dir,\r\n infile_basename)\r\n rename_command = '; mv %s %s' % (working_outfile_path,\r\n outfile_path)\r\n result_filepaths.append(outfile_path)\r\n\r\n command = '%s %s -p blastn -m 9 -e %s -F %s -W %s -b %s -i %s -d %s > %s %s %s' % \\\r\n (command_prefix,\r\n self._script_name,\r\n params['e_value'],\r\n complexity_filter_str,\r\n params['word_size'],\r\n params['num_hits'],\r\n fasta_fp,\r\n params['blast_db'],\r\n working_outfile_path,\r\n rename_command,\r\n command_suffix)\r\n commands.append(command)\r\n return commands, result_filepaths", "def backup_csv():\n for file_name in os.listdir():\n if \".csv\" in file_name:\n print(\"There shouldn't be any .csv files in your directory. We found .csv files in your directory.\")\n directory = os.getcwd()\n try:\n os.mkdir(directory + \"/backup/\")\n except:\n print(\"Backup folder exists.\")\n timestamp = datetime.now()\n shutil.move(file_name, directory + \"/backup/\" + str(timestamp) + \"-\" + file_name)", "def submit(self, src, dst):\n if dst.exists():\n # Tell the manager that the src was already move to the dst, but\n # the kwcoco files may need to be updated.\n assert not src.exists()\n self._previous_moves.append({'src': src, 'dst': dst})\n else:\n assert src.exists()\n self.jobs.append({'src': src, 'dst': dst})", "def store(self, job, result):\n pass", "def __manage_output_folder(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)", "def _finish(self):\n self._stop_signal = True\n self._dispose()\n\n output_path = self.get_output_path()\n if self.process_bsr and self.finished_ok:\n logger.info('Collecting best sellers data...')\n temp_file = output_path + 'temp_file.jl'\n cmd = '%s/product-ranking/add-best-seller.py %s %s > %s' % (\n REPO_BASE_PATH, output_path+'.jl',\n output_path+'_bs.jl', temp_file)\n try: # if best seller failed, download data without bsr column\n output = check_output(cmd, shell=True, stderr=STDOUT)\n logger.info('BSR script output: %s', output)\n with open(temp_file) as bs_file:\n lines = bs_file.readlines()\n with open(output_path+'.jl', 'w') as main_file:\n main_file.writelines(lines)\n os.remove(temp_file)\n except CalledProcessError as ex:\n logger.error('Best seller conversion error')\n logger.error(ex.output)\n logger.exception(ex)\n try:\n data_key = put_file_into_s3(\n AMAZON_BUCKET_NAME, output_path+'.jl')\n except Exception as ex:\n logger.error('Data file uploading error')\n logger.exception(ex)\n data_key = None\n logs_key = put_file_into_s3(\n AMAZON_BUCKET_NAME, output_path+'.log')\n\n if self.is_screenshot_job():\n jl_results_path = output_path + '.screenshot.jl'\n url2screenshot_log_path = output_path+'.screenshot.log'\n screenshot_finished = self._wait_for_screenshot_job_to_finish(output_path=output_path)\n if not screenshot_finished:\n logger.info('Screenshot job isnt finished, nothing to upload')\n else:\n try:\n put_file_into_s3(\n AMAZON_BUCKET_NAME, jl_results_path,\n is_add_file_time=True)\n except Exception as ex:\n logger.error('Screenshot file uploading error')\n logger.exception(ex)\n try:\n put_file_into_s3(\n AMAZON_BUCKET_NAME, url2screenshot_log_path,\n is_add_file_time=True)\n except Exception as ex:\n logger.error('url2screenshot log file uploading error')\n logger.exception(ex)\n\n csv_data_key = None\n global CONVERT_TO_CSV\n if CONVERT_TO_CSV:\n try:\n csv_filepath = convert_json_to_csv(output_path, logger)\n logger.info('JSON converted to CSV file created at: %r.', csv_filepath)\n csv_data_key = put_file_into_s3(\n AMAZON_BUCKET_NAME, csv_filepath)\n except Exception as e:\n logger.warning(\n \"CSV converter failed with exception: %s\", str(e))\n\n if data_key and logs_key:\n dump_result_data_into_sqs(\n data_key, logs_key, csv_data_key,\n self.task_data['server_name']+OUTPUT_QUEUE_NAME, self.task_data)\n else:\n logger.error(\"Failed to load info to results sqs. Amazon keys \"\n \"wasn't received. data_key=%r, logs_key=%r.\",\n data_key, logs_key)\n # TODO Fix spider stderr output\n logger.info(\"Spider default output:\\n%s\",\n self.process.stdout.read().strip())\n logger.info('Finish task #%s.', self.task_data.get('task_id', 0))\n\n self.finished = True\n self.finish_date = datetime.datetime.utcnow()\n self.task_data['finish_time'] = \\\n time.mktime(self.finish_date.timetuple())", "def main():\r\n parent_dir = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n\r\n if not (os.path.isdir(parent_dir)):\r\n raise Exception(\"The directory doesn't exist\")\r\n\r\n directories = []\r\n\r\n for directory in os.listdir(parent_dir):\r\n directories.append(os.path.join(parent_dir, directory))\r\n\r\n # The group_dic represents the dictionary with keys equal to the unique dates in the directories\r\n # And the values represent a list of all files that have the same date prefix across the data_dumps\r\n group_dic = grouping(directories)\r\n\r\n # Moving Files into New Directory\r\n move(group_dic, parent_dir)\r\n print(\"Files Moved Successfully\")", "def performFullBackupOfEntry(self, backupEntry):\n time = datetime.datetime.now()\n # timeString = self.datetimeToString(time)\n timeString = self.organizer.datetimeToString(time)\n\n name = backupEntry.getName()\n compression = backupEntry.getCompressionType()\n fileExtension = backupEntry.getFilenameExtension()\n directory = backupEntry.getDirectory()\n directoryName = directory.strip('/')\n if directoryName.find('/') == -1:\n directoryName = '/'\n else:\n while True:\n ind = directoryName.find('/')\n if ind == -1:\n break\n directoryName = directoryName[ind + 1 :]\n\n snarFilename = name + '_' + timeString + '.snar'\n tarFilename = name + '_' + timeString + '_' + self.fullBackupFilenameExtension + fileExtension\n\n snarFullFilename = os.path.join(self.backupDirectory, snarFilename)\n tarFullFilename = os.path.join(self.backupDirectory, tarFilename)\n\n tarDict = {\n 'tar': '',\n 'gz': 'z',\n 'bz2': 'j',\n 'xz': 'J'\n }\n\n command = 'tar'\n commandOptions = ' --listed-increment ' + snarFullFilename + ' -c' + tarDict[compression] + 'pf ' + tarFullFilename + ' ' + directoryName\n commandString = command + commandOptions\n\n logging.info('Starting full backup of ' + directory + '.')\n\n try:\n currentDir = os.getcwd()\n os.chdir(directory)\n os.chdir('..')\n process = subprocess.Popen(shlex.split(commandString), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n if stderr:\n logging.error('Executing tar resulted in an error.')\n logging.error(stderr)\n os.chdir(currentDir)\n logging.info('Successfully created full backup of ' + directory + ' and stored in ' + tarFullFilename + \\\n '. The corresponding snapshot was stored in ' + snarFullFilename + '.')\n\n except OSError as exception:\n logging.error('Exception occured: ' + str(exception))\n os.chdir(currentDir)\n except Exception as exception:\n logging.error('An unknown exception occured: ' + str(exception))\n os.chdir(currentDir)", "def _save_results(self, src, dst):\n\t\tlog = self.log\n\n\t\tif not os.path.isfile(src):\n\t\t\tlog.error(\"The folder \\\"%s\\\" doesn't exist.\" % src)\n\t\t\treturn False\n\n\t\tif not os.path.exists(dst):\n\t\t\ttry:\n\t\t\t\tos.makedirs(dst)\n\t\t\texcept (IOError, os.error), why:\n\t\t\t\tlog.error(\"Unable to create directory \\\"%s\\\": %s\" % (dst, why))\n\t\t\t\treturn False\n\t\telse:\n\t\t\tlog.error(\"The folder \\\"%s\\\" already exists. It should be used \" \\\n\t\t\t\t\t \"for storing results of task with ID %s. \" \\\n\t\t\t\t\t \"Have you deleted Cuckoo's database?\"\n\t\t\t\t\t % (dst, self.task[\"id\"]))\n\t\t\treturn False\n\t\ttry:\n\t\t\ttar = tarfile.open(src, \"r:gz\")\n\t\t\ttar.extractall(path = dst)\n\t\t\ttotal = len(tar.getmembers())\n\t\t\tlog.debug(\"Extracted %d elements\" % total)\n\t\texcept:\n\t\t\tlog.error(\"Trouble extracting '%s'\" % src)\n\t\t\treturn False\n\t\treturn True", "def unpack_or_cp():\n if args.input_type == \"zip\":\n zip_out, zip_error = Popen([\"unzip\", args.input, \"-d\", args.out_folder.strip() + \"/fasta\"], stdout=PIPE,stderr=PIPE).communicate()\n admin_log(zip_out, zip_error)\n else:\n cp_out, cp_error = Popen([\"cp\", args.input, args.out_folder.strip() + \"/fasta\"], stdout=PIPE,stderr=PIPE).communicate()\n admin_log(cp_out, cp_error)", "def move_email(self, s3key, directory, date):\r\n dest_key = os.path.join(self.s3_base_dir, directory)\r\n \r\n if date is not None:\r\n dest_key = os.path.join(dest_key, date.strftime(\"%Y%m%d\"), os.path.basename(s3key))\r\n else:\r\n dest_key = os.path.join(dest_key, os.path.basename(s3key))\r\n if dest_key == s3key:\r\n return\r\n dest_key = dest_key.replace('\\\\','/')\r\n #logging.info(\"moving file\", repr(s3key), \"->\", repr(dest_key))\r\n self.client.copy_object(self.s3_bucket, s3key, self.s3_bucket, dest_key)\r\n self.client.delete_object(self.s3_bucket, s3key)", "def download_output_files(self):\n bucket_list = self.bucket.list(\"output/part\")\n for bucket_entry in bucket_list:\n key_string = str(bucket_entry.key)\n # check if file exists locally, if not: download it\n if not os.path.exists(key_string):\n bucket_entry.get_contents_to_filename(\"../\" + key_string)\n else:\n print \"output file already exists, please delete\"", "def _jobfile(self):\n job = self.job.format(fnum=self.fnum)\n with open(job, 'w') as f:\n f.write('#!/bin/sh\\n' + self.phast_cmmd + self.cleanup_cmmd)", "def _persist_output(self, output, dir):\r\n try:\r\n mkdirp(dir)\r\n filename = os.path.join(dir, 'output.pkl')\r\n numpy_pickle.dump(output, filename, compress=self.compress)\r\n if self._verbose > 10:\r\n print('Persisting in %s' % dir)\r\n except OSError:\r\n \" Race condition in the creation of the directory \"", "def download_results(self, output_dir, progress=None):\n\n if self._uuid is not None:\n self.update()\n\n if not path.exists(output_dir):\n makedirs(output_dir)\n\n if self._dirty:\n self.results.get_all_files(output_dir, progress=progress)", "def write_and_submit_job(setting, manager, job_name, script_address):\n backup_folder = script_address.rsplit('/', maxsplit=1)[0]\n job_script_folder = backup_folder + '/Jobs/'\n job_output_file = job_script_folder + 'output.txt'\n print(job_output_file)\n if not os.path.exists(job_script_folder):\n os.makedirs(job_script_folder)\n job_script_address = job_script_folder + 'jobscript_'+manager+'.sh'\n with open(job_script_address, \"w\") as string_file:\n if manager == 'OGE':\n string_file.write(sungrid.job_script(setting, job_name=job_name, script_address=script_address, job_output_folder=job_script_folder))\n elif manager == 'Slurm':\n string_file.write(slurm.job_script(setting, job_name=job_name, script_address=script_address, job_output_file=job_output_file))\n else:\n raise ValueError(\"manager should be in ['OGE', 'Slurm']\")\n string_file.close()\n for root, dir_list, file_list in os.walk(backup_folder):\n for f in dir_list+file_list:\n os.chmod(os.path.join(root, f), 0o754)\n if manager == 'OGE':\n submit_cmd = 'qsub ' + job_script_address\n elif manager == 'Slurm':\n submit_cmd = 'sbatch ' + job_script_address\n else:\n raise ValueError(\"manager should be in ['OGE', 'Slurm']\")\n os.system(submit_cmd)", "def make_dump_file_name(args: Namespace, wp_config: Dict, now: datetime) -> Location:\n\n if not args.snapshot_base_name:\n base_name = wp_config[\"db_name\"]\n else:\n base_name = args.snapshot_base_name\n\n name = args.file_name_template.format(base=base_name, time=now.isoformat() + \"Z\")\n\n return args.backup_dir.child(name)", "def transfer(self):\n\n # Upload unverified matches to s3 bucket if unverified argument used (production only)\n if self.in_args.unverified:\n files = glob.glob(os.path.join(self.directories['unverified_matches_dir'].format(self.region_dir, self.proc_type), '*'))\n\n # Loop through files found in unverified_matches folder\n for filepath in files:\n filename = os.path.basename(filepath)\n # Upload each file to S3 bucket folder\n self.upload_file(filepath, self.bucket, 'UK_suppliers/Unverified_Matches/' + filename)\n self.unverified_file = filename\n\n # Zip file creation - note will only work for latest unverified file. Above loop is added just incase\n # any residual files get added manually to S3 bucket.\n\n # Get filepaths of stats file, filtered and excluded matches files\n stats_fp = self.directories['stats_file'].format(self.region_dir, self.proc_type)\n filtered_matches_fp = self.directories['filtered_matches'].format(self.region_dir, self.proc_type) + '_' + \\\n str(self.best_config) + '.csv'\n\n excluded_matches_fp = self.directories['excluded_matches'].format(self.region_dir, self.proc_type) + '_' + \\\n str(self.best_config) + '.csv'\n\n blacklisted_strings_fp = self.directories['blacklisted_string_matches'].format(self.region_dir)\n\n stats_file_fp = self.directories['script_performance_stats_file'].format(self.region_dir, self.proc_type)\n\n # Assign zip file which will contain above files\n files_zip = self.unverified_file[:10] + \"_files.zip\"\n\n with ZipFile(files_zip, 'w') as myzip:\n myzip.write(stats_fp, os.path.basename(stats_fp))\n myzip.write(filtered_matches_fp,os.path.basename(filtered_matches_fp))\n myzip.write(excluded_matches_fp, os.path.basename(excluded_matches_fp))\n myzip.write(blacklisted_strings_fp, os.path.basename(blacklisted_strings_fp))\n myzip.write(stats_file_fp, os.path.basename(stats_file_fp))\n\n self.upload_file(files_zip, self.bucket, 'UK_suppliers/Archive/' + files_zip)\n\n # Download verified matches from s3 bucket if verified argument (production only)\n if self.in_args.verified:\n self.process_verified_files()\n\n # Add confirmed matches/non-matches to training file\n if self.in_args.convert_training:\n self.runfile_mods.convert_training.ConvertToTraining.convert(self)", "def process_job(q):\n del log_msg[:]\n logger.info('Processing Job %s', q.id)\n\n datatype = q.datatype\n input_dir = q.input_dir\n output_dir = q.output_dir\n processor = q.processor\n if datatype.lower() == 'laz':\n block_name = proper_block_name(input_dir)\n elif datatype.lower() == 'ortho':\n block_name = proper_block_name_ortho(input_dir)\n if datatype.lower() == 'laz' or datatype.lower() == 'ortho':\n logger.info('Verifying las tiles in directory...')\n log_msg.append('Verifying las tiles in directory...\\n')\n has_error, remarks = verify_dir(input_dir, datatype.lower())\n\n if has_error:\n assign_status(q, error=True)\n log_msg.append('Error in verify_las/verify_raster!\\n {0} \\n'.format(remarks))\n else:\n logger.info('Renaming tiles...')\n\n logger.info('BLOCK NAME %s', block_name)\n log_msg.append('BLOCK NAME {0}\\n'.format(block_name))\n\n in_coverage, block_uid = find_in_coverage(block_name)\n\n #: Check first if folder or `block_name` is in `Cephgeo_LidarCoverageBlock`\n #: If not found, `output_dir` is not created and data is not processed\n if in_coverage:\n logger.info('Found in Lidar Coverage model %s %s',\n block_name, block_uid)\n log_msg.append('Found in Lidar Coverage model {0} {1}\\n'.format(\n block_name, block_uid))\n\n rename_tiles(input_dir, output_dir, processor,\n block_name, block_uid, q)\n logger.info('Status %s Status Timestamp %s',\n q.status, q.status_timestamp)\n log_msg.append('Status {0} Status Timestamp {1}\\n'.format(\n q.status, q.status_timestamp))\n\n else:\n has_error = True\n logger.error('ERROR NOT FOUND IN MODEL %s %s', block_name, block_uid)\n log_msg.append('ERROR NOT FOUND IN MODEL {0} {1}\\n'.format(block_name, block_uid))\n assign_status(q, error=True)\n # for DEM\n else:\n logger.info('Handler not implemented for type: %s',\n str(q.datatype))\n log_msg.append('Handler not implemented for type: {0}\\n'.format(\n str(q.datatype)))\n assign_status(q, error=True)\n\n paragraph = ''\n for par in log_msg:\n paragraph = paragraph + par\n\n #: Save log messages from renaming tiles to `Automation_AutomationJob.log`\n with PSQL_DB.atomic() as txn:\n new_q = (Automation_AutomationJob\n .update(data_processing_log=paragraph, status_timestamp=datetime.now())\n .where(Automation_AutomationJob.id == q.id))\n new_q.execute()", "def return_results(out, read_prefix, output_folder, temp_folder):\n # Make sure the output folder ends with a trailing slash\n if output_folder[-1] != \"/\":\n output_folder += \"/\"\n\n # Make a temporary file\n temp_fp = os.path.join(temp_folder, read_prefix + '.json')\n with open(temp_fp, 'wt') as fo:\n json.dump(out, fo)\n\n # Compress the output\n run_cmds(['gzip', temp_fp])\n temp_fp = temp_fp + '.gz'\n\n if output_folder.startswith('s3://'):\n # Copy to S3\n run_cmds([\n 'aws',\n 's3',\n 'cp',\n '--quiet',\n '--sse',\n 'AES256',\n temp_fp,\n output_folder])\n os.unlink(temp_fp)\n else:\n # Copy to local folder\n run_cmds(['mv', temp_fp, output_folder])", "def check_output(self):\n directory, file = split(self.target)\n if not exists(directory):\n mkdir(directory)\n if exists(self.target):\n unlink(self.target)", "def backup_data():\n\ttry:\n\t\tos.chdir(backup_directory)\n\texcept:\n\t\tprint(\"Backup folder does not exist!\")\n\tfor directory in directories:\n\t\tshutil.rmtree('./'+directory)\n\tos.chdir('..')\n\tfor directory in directories:\n\t\tprint(\"Backing up data for label '{}'...\".format(directory))\n\t\tshutil.copytree('./'+directory, backup_directory+'/'+directory)\n\tprint(\"Backup complete!\")", "def execute(self, log_out, log_err):\n returncode = MG.execute(self, log_out, log_err)\n lhe_files = glob.glob(os.path.join(self.event_dir, \"*.lhe.gz\"))\n for f in lhe_files:\n dest = os.path.join(self.rundir, '%s_%s' % (self.name, os.path.basename(f)))\n logger.debug(\"Copying '%s' to '%s'\" % (f, dest))\n shutil.copy(f, dest)\n os.chdir(self.rundir)\n return returncode", "def backup_files(self):\n backup_path = os.path.join(self.backupdir, self.get_timestamp().replace(':', '-'))\n try:\n if not os.path.exists(backup_path):\n self.make_path(backup_path)\n if not os.path.exists(backup_path):\n raise IOError('Path was not made correctly')\n else:\n self.print_to_log('Backup path: %s' % backup_path)\n for item in self.file_list:\n try:\n self.print_to_log('Backing up file: %s' % item)\n shutil.copy(item, backup_path)\n except IOError, why:\n self.error = 2\n self.print_to_log(str(why))\n self.print_to_log('Unable to archive file: %s continuing' % item)\n except IOError, why:\n self.print_to_log(str(why))\n self.print_to_log('Quiting with out archiving')\n self.error = 1", "def download(url, output, encoding, insrs, format_name):\n\n folder = download_data(url, encoding)\n joined_file = join_files(folder)\n transform(joined_file, output, insrs, format_name)\n\n shutil.rmtree(folder)\n os.remove(joined_file)\n\n if not os.path.isfile(output):\n raise Error(\"Output file not created, the whole process failed\")\n else:\n logging.info(\"File %s successfuly created\" % output)", "def push_to_vospace(self, job):\n transfer = Transfer(job.jobInfo)\n # Loop checking for upload until 1 hour has passed\n starttime = datetime.now()\n location = self.sm.get_location(transfer.target)[0]['location']\n original = os.path.exists(location) and datetime.fromtimestamp(os.stat(location).st_mtime) or None\n while (datetime.now() - starttime).seconds / 3600 == 0:\n if os.path.exists(location):\n statinfo = os.stat(location)\n last_modified = datetime.fromtimestamp(statinfo.st_mtime)\n if last_modified != original and (datetime.now() - last_modified).seconds > 5: break\n sleep(1) \n # Log transfer as completed\n self.sm.complete_transfers(job.jobId) \n return None", "def backup_file(cloud, input_file, output_file):\n if cloud.find_one(path=output_file):\n return False\n\n print \"Backing up file:\", input_file, \"->\", output_file\n cloud.store_from_filename(input_file, output_file)\n\n return True", "def performIncrementalBackupOfEntry(self, backupEntry):\n time = datetime.datetime.now()\n # timeString = self.datetimeToString(time)\n timeString = self.organizer.datetimeToString(time)\n\n name = backupEntry.getName()\n compression = backupEntry.getCompressionType()\n fileExtension = backupEntry.getFilenameExtension()\n directory = backupEntry.getDirectory()\n directoryName = directory.strip('/')\n if directoryName.find('/') == -1:\n directoryName = '/'\n else:\n while True:\n ind = directoryName.find('/')\n if ind == -1:\n break\n directoryName = directoryName[ind + 1 :]\n\n tarFilename = name + '_' + timeString + '_' + self.incrementalBackupFilenameExtension + fileExtension\n tarFullFilename = os.path.join(self.backupDirectory, tarFilename)\n\n tarDict = {\n 'tar': '',\n 'gz': 'z',\n 'bz2': 'j',\n 'xz': 'J'\n }\n\n\n logging.info('Starting incremental backup of ' + directory + '.')\n try:\n lastFullBackupTime = self.organizer.getTimeOfLastFullBackup(backupEntry)\n except utils.NoBackupException:\n logging.error('Could not find a previous full backup of ' + directory + '. Aborting!')\n return\n lastFullBackupTimeString = self.organizer.datetimeToString(lastFullBackupTime)\n\n snarFilename = name + '_' + lastFullBackupTimeString + '.snar'\n snarFullFilename = os.path.join(self.backupDirectory, snarFilename)\n\n command = 'tar'\n commandOptions = ' --listed-increment ' + snarFullFilename + ' -c' + tarDict[compression] + 'pf ' + tarFullFilename + ' ' + directoryName\n commandString = command + commandOptions\n\n try:\n currentDir = os.getcwd()\n os.chdir(directory)\n os.chdir('..')\n process = subprocess.Popen(shlex.split(commandString), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n if stderr:\n logging.error('Executing tar resulted in an error.')\n logging.error(stderr)\n os.chdir(currentDir)\n logging.info('Successfully created incremental backup of ' + directory + ' and stored in ' + tarFullFilename + \\\n '. The corresponding snapshot was stored in ' + snarFullFilename + '.')\n\n except OSError as exception:\n logging.error('Exception occured: ' + str(exception))\n os.chdir(currentDir)\n except Exception as exception:\n logging.error('An unknown exception occured: ' + str(exception))\n os.chdir(currentDir)", "def save_output(self):\n # Auxiliary functions\n def intro(otype, suffix):\n self.logprint(\"Saving {}...\".format(otype))\n dirname = os.path.join(self.outpath,\\\n self.conf[\"output_prefix\"] + \"_files/{}\".format(suffix))\n if os.path.exists(dirname): # Overwrite existing output\n shutil.rmtree(dirname)\n os.makedirs(dirname)\n return(dirname)\n def save(obj, filename):\n try:\n f = open(filename, \"wb\")\n pickle.dump(obj, f)\n finally:\n f.close()\n def outro(otype): self.logprint(\"{} saved.\".format(otype).capitalize())\n # Saving output\n if self.conf[\"output_mode\"] >= 2: # Save all snapshot pops\n dirname = intro(\"snapshot populations\", \"populations/snapshots\")\n for n in xrange(self.conf[\"n_runs\"]):\n for m in xrange(self.conf[\"n_snapshots\"]):\n pop = self.runs[n].record[\"snapshot_pops\"][m]\n filename = dirname + \"/run{0}_s{1}.pop\".format(n,m)\n save(pop, filename)\n del self.runs[n].record[\"snapshot_pops\"]\n outro(\"snapshot populations\")\n if self.conf[\"output_mode\"] >= 1: # Save final populations\n dirname = intro(\"final populations\", \"populations/final\")\n for n in xrange(self.conf[\"n_runs\"]):\n pop = self.runs[n].record[\"final_pop\"]\n filename = dirname + \"/run{}.pop\".format(n)\n save(pop, filename)\n del self.runs[n].record[\"final_pop\"]\n outro(\"final populations\")\n if self.conf[\"output_mode\"] >= 0: # Save records\n dirname = intro(\"run records\", \"records\")\n for n in xrange(self.conf[\"n_runs\"]):\n rec = self.runs[n].record\n filename = dirname + \"/run{}.rec\".format(n)\n save(rec, filename)\n outro(\"run records\")", "def run(self):\n type = self.config.get('type', DEFAULT_BACKUP_TYPE)\n backup_dir = self.config.get('backup_dir', self.default_ongoing_backup_dir)\n archive = self.config.get('archive', False)\n only_postprocess = self.config.get('only_postprocess', False)\n compress = self.config.get('compress', False)\n rotate = self.config.get('rotate', False)\n threads = self.config.get('threads', DEFAULT_BACKUP_THREADS)\n\n # find or generate the backup file/dir\n if only_postprocess:\n if self.name.startswith('/'): # if passed an absolute path as section name\n # basedir doesn't work as intended if passed /a/path/like/this/\n backup_dir = os.path.normpath(os.path.join(self.name, '..'))\n self.parse_backup_file()\n else:\n self.find_backup_file(backup_dir)\n if self.file_name is None:\n msg = 'Problem while trying to find the backup files at %s'\n self.logger.error(msg, backup_dir)\n return 10\n else:\n self.generate_file_name(backup_dir)\n\n output_dir = os.path.join(backup_dir, self.dir_name)\n if type == 'dump':\n backup = MyDumperBackup(self.config, self)\n elif type == 'snapshot':\n backup = MariaBackup(self.config, self)\n elif type == 'null':\n backup = NullBackup(self.config, self)\n else:\n self.logger.error('Unrecognized backup format: %s', type)\n return 11\n\n # get the backup command\n if not only_postprocess:\n cmd = backup.get_backup_cmd(backup_dir)\n\n # start status monitoring\n if 'statistics' in self.config: # Enable statistics gathering?\n source = self.config.get('host', 'localhost') + \\\n ':' + \\\n str(self.config.get('port', DEFAULT_PORT))\n stats = DatabaseBackupStatistics(dir_name=self.dir_name, section=self.name,\n type=type, config=self.config.get('statistics'),\n backup_dir=output_dir, source=source)\n else:\n stats = DisabledBackupStatistics()\n\n stats.start()\n\n if not only_postprocess:\n # run backup command\n self.logger.debug(cmd)\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = process.communicate()\n if backup.errors_on_output(out, err):\n stats.fail()\n return 3\n\n # Check log for errors\n if backup.errors_on_log():\n self.logger.error('Error log found at %s', self.log_file)\n stats.fail()\n return 4\n\n # Check medatada file exists and containg the finish date\n if backup.errors_on_metadata(backup_dir):\n self.logger.error('Incorrect metadata file')\n stats.fail()\n return 5\n\n # Backups seems ok, prepare it for recovery and cleanup\n try:\n cmd = backup.get_prepare_cmd(backup_dir)\n except BackupException as ex:\n self.logger.error(str(ex))\n stats.fail()\n return 13\n if cmd != '':\n self.logger.debug(cmd)\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = process.communicate()\n if backup.errors_on_prepare(out, err):\n self.logger.error('The mariabackup prepare process did not complete successfully')\n stats.fail()\n return 6\n\n # get file statistics\n stats.gather_metrics()\n\n if archive:\n backup.archive_databases(output_dir, threads)\n\n if compress:\n # no consolidation per-db, just compress the whole thing\n result = self.tar_and_remove(backup_dir, self.file_name, [self.dir_name, ],\n compression='/usr/bin/pigz -p {}'.format(threads))\n if result != 0:\n self.logger.error('The compression process failed')\n stats.fail()\n return 11\n\n if rotate:\n # perform rotations\n # move the old latest one to the archive, and the current as the latest\n # then delete old backups of the same section, according to the retention\n # config\n result = self.move_backups(self.name, self.default_final_backup_dir,\n self.default_archive_backup_dir, self.name_regex)\n if result != 0:\n self.logger.warning('Archiving backups failed')\n result = self.os_rename(os.path.join(backup_dir, self.file_name),\n os.path.join(self.default_final_backup_dir, self.file_name))\n if result != 0:\n self.logger.error('Moving backup to final dir failed')\n stats.fail()\n return 12\n result = self.purge_backups()\n if result != 0:\n self.logger.warning('Purging old backups failed')\n\n # we are done\n stats.finish()\n return 0", "def _get_job_commands(self,\r\n fasta_fps,\r\n output_dir,\r\n params,\r\n job_prefix,\r\n working_dir,\r\n command_prefix='/bin/bash; ',\r\n command_suffix='; exit'):\r\n # Create basenames for each of the output files. These will be filled\r\n # in to create the full list of files created by all of the runs.\r\n out_filenames = [job_prefix + '.%d_aligned.fasta',\r\n job_prefix + '.%d_failures.fasta',\r\n job_prefix + '.%d_log.txt']\r\n\r\n # Initialize the command_prefix and command_suffix\r\n command_prefix = command_prefix or '/bin/bash; '\r\n command_suffix = command_suffix or '; exit'\r\n\r\n # Create lists to store the results\r\n commands = []\r\n result_filepaths = []\r\n\r\n # If there is a value for blast_db, pass it. If not, it\r\n # will be created on-the-fly. Note that on-the-fly blast dbs\r\n # are created with a string of random chars in the name, so this is safe.\r\n # They shouldn't overwrite one another, and will be cleaned up.\r\n if params['blast_db']:\r\n blast_str = '-d %s' % params['blast_db']\r\n else:\r\n blast_str = ''\r\n\r\n # Iterate over the input files\r\n for i, fasta_fp in enumerate(fasta_fps):\r\n # Each run ends with moving the output file from the tmp dir to\r\n # the output_dir. Build the command to perform the move here.\r\n rename_command, current_result_filepaths = self._get_rename_command(\r\n [fn % i for fn in out_filenames], working_dir, output_dir)\r\n result_filepaths += current_result_filepaths\r\n\r\n command = \\\r\n '%s %s %s -p %1.2f -e %d -m pynast -t %s -a %s -o %s -i %s %s %s' %\\\r\n (command_prefix,\r\n self._script_name,\r\n blast_str,\r\n params['min_percent_id'],\r\n params['min_length'],\r\n params['template_fp'],\r\n params['pairwise_alignment_method'],\r\n working_dir,\r\n fasta_fp,\r\n rename_command,\r\n command_suffix)\r\n\r\n commands.append(command)\r\n\r\n return commands, result_filepaths", "def finish (self):\n if (self.testRun):\n return\n if (self.lastRunFile is None):\n return\n try:\n lrf = open (self.lastRunFile, 'w')\n lrf.write (time.asctime (self.backupStarted))\n lrf.close()\n except Exception as e:\n raise Exception (\"Error writing to last run file!\")", "def copy_results(self):\n for k in self.buck_map.keys():\n copyfile(pjoin(self.wdir, f'{k}.nii.gz'), pjoin(self.outdir, f'{k}.nii.gz'))\n for prefix in ['snfit.PRF+orig', 'Buck.PRF+orig']:\n for suffix in ['HEAD', 'BRIK']:\n copyfile(pjoin(self.wdir, \".\".join([prefix, suffix])), pjoin(self.outdir, \".\".join([prefix, suffix])))", "def resolve_backup_target(self):\n\n response = self.http_client.get(\n self.metadata_url + 'nodes?filters=kind:FOLDER AND isRoot:true')\n parent_node_id = response.json()['data'][0]['id']\n\n for component in [x for x in self.backup_target.split('/') if x]:\n # There doesn't seem to be escaping support, so cut off filter\n # after first unsupported character\n query = re.search('^[A-Za-z0-9_-]*', component).group(0)\n if component != query:\n query = query + '*'\n\n matches = self.read_all_pages(\n self.metadata_url + 'nodes?filters=kind:FOLDER AND name:%s '\n 'AND parents:%s' % (query, parent_node_id))\n candidates = [f for f in matches if f.get('name') == component]\n\n if len(candidates) >= 2:\n log.FatalError('There are multiple folders with the same name '\n 'below one parent.\\nParentID: %s\\nFolderName: '\n '%s' % (parent_node_id, component))\n elif len(candidates) == 1:\n parent_node_id = candidates[0]['id']\n else:\n log.Debug('Folder %s does not exist yet. Creating.' % component)\n parent_node_id = self.mkdir(parent_node_id, component)\n\n log.Debug(\"Backup target folder has id: %s\" % parent_node_id)\n self.backup_target_id = parent_node_id", "def job_download(self, job_id):\n\n target = QFileDialog.getExistingDirectory(self, 'Where to save the resulting files?')\n if target:\n paths = self.backend.job_result_download(job_id, target)\n info(self.iface, \"Successfully Downloaded to {}\".format(paths))", "def wsRenameOutput(self, nj):\n\n txt = '\\n#Written by cms_cmssw::wsRenameOutput\\n'\n txt += 'echo \">>> current directory $PWD\" \\n'\n txt += 'echo \">>> (SOFTWARE_DIR): $SOFTWARE_DIR\" \\n'\n txt += 'echo \">>> (WORKING_DIR): $WORKING_DIR\" \\n'\n txt += 'echo \">>> current directory content:\"\\n'\n #if self.debug_wrapper==1:\n txt += 'ls -Al\\n'\n txt += '\\n'\n\n for fileWithSuffix in (self.output_file):\n output_file_num = numberFile(fileWithSuffix, '$OutUniqueID')\n txt += '\\n'\n txt += '# check output file\\n'\n txt += 'if [ -e ./'+fileWithSuffix+' ] ; then\\n'\n if (self.copy_data == 1): # For OSG nodes, file is in $WORKING_DIR, should not be moved to $RUNTIME_AREA\n txt += ' mv '+fileWithSuffix+' '+output_file_num+'\\n'\n txt += ' ln -s `pwd`/'+output_file_num+' $RUNTIME_AREA/'+fileWithSuffix+'\\n'\n else:\n txt += ' mv '+fileWithSuffix+' $RUNTIME_AREA/'+output_file_num+'\\n'\n txt += ' ln -s $RUNTIME_AREA/'+output_file_num+' $RUNTIME_AREA/'+fileWithSuffix+'\\n'\n txt += 'else\\n'\n txt += ' job_exit_code=60302\\n'\n txt += ' echo \"WARNING: Output file '+fileWithSuffix+' not found\"\\n'\n if common.scheduler.name().upper() == 'CONDOR_G':\n txt += ' if [ $middleware == OSG ]; then \\n'\n txt += ' echo \"prepare dummy output file\"\\n'\n txt += ' echo \"Processing of job output failed\" > $RUNTIME_AREA/'+output_file_num+'\\n'\n txt += ' fi \\n'\n txt += 'fi\\n'\n file_list = []\n for fileWithSuffix in (self.output_file):\n file_list.append(numberFile('$SOFTWARE_DIR/'+fileWithSuffix, '$OutUniqueID'))\n\n txt += 'file_list=\"'+string.join(file_list,',')+'\"\\n'\n txt += '\\n'\n txt += 'echo \">>> current directory $PWD\" \\n'\n txt += 'echo \">>> (SOFTWARE_DIR): $SOFTWARE_DIR\" \\n'\n txt += 'echo \">>> (WORKING_DIR): $WORKING_DIR\" \\n'\n txt += 'echo \">>> current directory content:\"\\n'\n #if self.debug_wrapper==1:\n txt += 'ls -Al\\n'\n txt += '\\n'\n txt += 'cd $RUNTIME_AREA\\n'\n txt += 'echo \">>> current directory (RUNTIME_AREA): $RUNTIME_AREA\"\\n'\n return txt", "def do_after_job(self, item, dump_items):\n self.checksummer.cp_chksum_tmpfiles_to_permfile()\n # this will include checkpoint files if they are enabled.\n for dfname in item.oflister.list_outfiles_to_publish(item.oflister.makeargs(self.dump_dir)):\n if os.path.exists(self.dump_dir.filename_public_path(dfname)):\n # why would the file not exist? because we changed number of file parts in the\n # middle of a run, and now we list more files for the next stage than there\n # were for earlier ones\n self.symlinks.save_symlink(dfname)\n self.feeds.save_feed(dfname)\n self.checksummer.checksums(dfname, self)\n self.symlinks.cleanup_symlinks()\n self.feeds.cleanup_feeds()\n self.runinfo.save_dump_runinfo(\n RunInfo.report_dump_runinfo(dump_items))", "def makeBackup(self):\n #--File Path\n original = self.path\n #--Backup\n backup = self.path+'.bak'\n shutil.copy(original,backup)\n #--First backup\n firstBackup = self.path+'.baf'\n if not os.path.exists(firstBackup):\n shutil.copy(original,firstBackup)", "def re_process(self):\n rmtree(self.processed_dir)\n os.makedirs(self.processed_dir)\n self.process()\n\n print('Done!')", "def execute(self, log_out, log_err):\n returncode = MG.execute(self, log_out, log_err)\n lhe_files = glob.glob(os.path.join(self.event_dir, \"*.lhe.gz\"))\n for f in lhe_files:\n dest = os.path.join(self.rundir, os.path.basename(f))\n logger.debug(\"Copying '%s' to '%s'\" % (f, dest))\n shutil.copy(f, dest)\n os.chdir(self.rundir)\n return returncode", "def url_to_destination( self, url ):\n return JobDestination( runner=\"lwr\", params=url_to_destination_params( url ) )", "def export(self, location):\n temp_dir = tempfile.mkdtemp('-export', 'pip-')\n self.unpack(temp_dir)\n try:\n call_subprocess(\n [self.cmd, 'archive', location],\n filter_stdout=self._filter, show_stdout=False, cwd=temp_dir)\n finally:\n rmtree(temp_dir)", "def replace_gen(self):\r\n current_path = os.path.join(self.settings.save_path, 'current.json')\r\n current_folder_path = os.path.join(self.settings.save_path, 'current')\r\n history_path = os.path.join(self.settings.save_path, 'history')\r\n archive_folder_path = os.path.join(history_path, f'gen{self.generation}')\r\n archive_path = os.path.join(archive_folder_path, 'current') # no ending allowed\r\n archive_json_path = os.path.join(archive_folder_path, 'current.json')\r\n\r\n\r\n if not os.path.exists(current_path):\r\n raise FileNotFoundError\r\n if not os.path.exists(current_folder_path):\r\n raise FileNotFoundError\r\n\r\n os.makedirs(history_path, exist_ok=True)\r\n os.makedirs(archive_folder_path)\r\n\r\n cwd = os.getcwd()\r\n shutil.make_archive(archive_path, 'zip', current_folder_path)\r\n os.chdir(cwd)\r\n shutil.rmtree(current_folder_path, onerror=_ignore_del_dir_failure)\r\n os.chdir(cwd)\r\n\r\n os.rename(current_path, archive_json_path)", "def worker(self, move_job):\n try:\n # Need o handle a list of sources\n self._logger.info(\"Starting move for job with id %s\", move_job.id)\n move_job.update(status=MoveJob.STATUS_IN_PROGRESS,\n start_timestamp=datetime.datetime.now())\n\n # source can be just 1 source or a list of sources\n if isinstance(move_job.source, str):\n sourcelist = [move_job.source]\n elif isinstance(move_job.source, list):\n sourcelist = move_job.source\n else:\n raise Exception('Invalid source {1}'.format(move_job.source))\n\n # Validate the destination url\n dest_url = urlparse(move_job.destination)\n if dest_url.scheme in ('swift+http', 'swift+https') and not self._has_credential():\n raise Exception('Credential for Nectar swift service is not configured.')\n\n # Download all the files from the sources to the destination\n destination = build_destination(move_job.destination, self._config)\n\n for s in sourcelist:\n source = build_source(s, move_job.userid, self._config)\n movelib.move(source, destination)\n move_job.update(status=MoveJob.STATUS_COMPLETE, start_timestamp=datetime.datetime.now())\n except Exception as e:\n # catch any Exception here so that we can properly update the job state\n reason = 'Move has failed for job with id {0}. Reason: {1}'.format(move_job.id, str(e))\n self._logger.warning(reason)\n move_job.update(status=MoveJob.STATUS_FAILED, end_timestamp=datetime.datetime.now(), reason=reason)", "def execute(self, log_out, log_err):\n EventGenerator.execute(self, log_out, log_err)\n if 'moller' not in self.name:\n src = os.path.join(self.rundir, 'brems.stdhep')\n dest = os.path.join(self.rundir, self.output_files()[0])\n logger.debug(\"Copying '%s' to '%s'\" % (src, dest))\n shutil.copy(src, dest)", "def prepare_output_dir(out_dir, test_dir):\r\n\r\n if not out_dir.exists():\r\n out_dir.mkdir()\r\n\r\n # get the necessary file names\r\n file_names = get_file_names(test_dir, args.distance, print_file_names=False)\r\n\r\n # copy the images in the firstIms into the output folder\r\n for name in file_names[1][0]:\r\n file_path = Path(test_dir / name)\r\n copy_to = Path(out_dir / name)\r\n shutil.copy(file_path, copy_to)\r\n\r\n # the firstIms list does not contain the last image,\r\n # so we need to also copy the last image of the secIms into the output folder\r\n last_im = file_names[1][1][-1]\r\n shutil.copy(Path(test_dir/last_im), Path(out_dir/last_im))\r\n\r\n return file_names", "def copyziplatest_ap():\n\n #Argparse:{{{\n import argparse\n \n parser=argparse.ArgumentParser()\n parser.add_argument(\"sourceroot\", help = \"the folder where the latest folder is copied from i.e. regbackup/code/M5/\")\n parser.add_argument(\"destroot\", help = \"the folder where the zip will be created\")\n parser.add_argument(\"--maxbackups\", type = int, help = \"the maximum number of backups. Once this is reached, old backups are deleted when new ones are created.\")\n \n args=parser.parse_args()\n #End argparse:}}}\n\n copyziplatest(args.sourceroot, args.destroot, maxbackups = args.maxbackups)", "def backup_file(self):\n _backupO = Backups()\n _backupFilename = _backupO.backup_file(self.job[\"JSONfileToBeEdited\"])\n return 'Original file %s backed up to %s' % (\n self.job[\"JSONfileToBeEdited\"], _backupFilename)", "def move_and_record( three_paths, suffix ):\n global conn, curs, dryrun, std_file_perms, std_dir_perms\n moved_datasets = []\n split_datasets = []\n nosrc_datasets = []\n failed_datasets = []\n if dryrun:\n for scrv,vnh,epbv in three_paths:\n print \"would move\",scrv,\"to\",epbv\n else:\n for scrv,vnh,epbv in three_paths:\n try:\n os.renames(scrv,epbv)\n chgrp_perms_updown( epbv, group='climatew' )\n logging.info( \"moved %s to %s\" % (scrv,epbv) )\n moved_datasets.append( (vnh,epbv) )\n except Exception as e:\n logging.warning( \"could not move %s to %s due to %s\" % (scrv,epbv,e) )\n if (os.path.isdir(epbv) and len(os.listdir(epbv))>0 and (\n not os.path.isdir(scrv)) or len(os.listdir(scrv))==0):\n # data have already been moved; nothing left here\n moved_datasets.append( (vnh,epbv) )\n logging.info(\"data is already in %s\" % epbv)\n elif os.path.isdir(epbv) and len(os.listdir(epbv))>0 and\\\n os.path.isdir(scrv) and len(os.listdir(scrv))>0:\n # data in both directories. Probably the dataset was changed\n split_datasets.append( vnh )\n logging.info(\"data is in both %s and %s\" % (scrv,epbv) )\n elif not os.path.isdir(scrv):\n # The source doesn't exist, and it hasn't already been (substantively) moved.\n nosrc_datasets.append( vnh )\n logging.info(\"source %s does not exist\" % scrv )\n else:\n # don't know what's wrong, could be a permissions problem for making epbv\n failed_datasets.append( vnh )\n logging.info(\"unknown problem with moving data %s\" %vnh )\n\n # Write a file listing the new locations of the new complete datasets.\n # To prevent premature processing of the file, it will be written to /tmp, permissions limited,\n # and then moved. Finally permissions will be relaxed.\n # Write another file listing any new complete datasets which could not be moved. This duplicates\n # logging information, but is convenient input for a future second try.\n tmpfile = \"/tmp/datasets_since_%s\" % suffix\n outfile = \"/p/user_pub/publish-queue/CMIP6-list-todo/\"+os.path.basename(tmpfile)\n nosrcfile = \"/p/css03/scratch/logs/nosrc_datasets_%s\" % suffix\n splitfile = \"/p/css03/scratch/logs/split_datasets_%s\" % suffix\n failfile = \"/p/css03/scratch/logs/unmoved_datasets_%s\" % suffix\n with open( tmpfile, 'w' ) as f:\n for path in [ pp[1] for pp in moved_datasets ]:\n f.write( \"%s\\n\" % path )\n # owner only can read/write tmpfile:\n chgrp_perms( tmpfile, group='painter', permissions=(stat.S_IRUSR | stat.S_IWUSR ))\n if not dryrun:\n shutil.copy( tmpfile, os.path.dirname(failfile) ) # moved files; for logging\n shutil.move( tmpfile, os.path.dirname(outfile) ) # moved files; for publishing\n chgrp_perms( outfile, group='climatew', permissions=std_file_perms )\n with open( splitfile, 'w' ) as f:\n for path in [ p for p in split_datasets ]:\n f.write( \"%s\\n\" % path )\n with open( nosrcfile, 'w' ) as f:\n for path in [ p for p in nosrc_datasets ]:\n f.write( \"%s\\n\" % path )\n with open( failfile, 'w' ) as f:\n for path in [ p for p in failed_datasets ]:\n f.write( \"%s\\n\" % path )", "def job_delete(job):\n\n if os.path.exists(job.output_abspath):\n os.remove(job.output_abspath)\n db.session.delete(job)\n db.session.commit()", "def _backupLog(self, updateText):\n \taFile = \"archiving_log.txt\"\n \tos.rename( aFile, aFile+\"~\")\n \tdestination= open( aFile, \"w\" )\n \tsource= open( aFile+\"~\", \"r\" )\n \tfor line in source:\n \t\tdestination.write( line )\n \tdestination.write( str(updateText))\n \tsource.close()\n \tdestination.close()\n \tos.remove(aFile+\"~\")", "def rollback(folder_name, with_subfolders):\n process_backups(folder_name, with_subfolders, lambda x: copy2(x, x[:-4]))", "def save(self, prefix, replace=False):\n if not _check_prefix(prefix):\n return\n\n # Create the results directory\n try:\n os.makedirs(prefix)\n except OSError:\n if not replace:\n print('Error directory already exists: %s' % prefix,\n file=stderr)\n return\n elif prefix.startswith('rpl_'):\n shutil.rmtree(prefix)\n os.makedirs(prefix)\n else:\n print('Error only directries begining with \"rpl_\" can be'\n 'replaced', file=stderr)\n return\n\n # Moves all the files into it\n shutil.copy(self._bnd, prefix+'/%s.bnd' % os.path.basename(prefix))\n shutil.copy(self._cfg, prefix+'/%s.cfg' % os.path.basename(prefix))\n\n maboss_files = filter(lambda x: x.startswith(self.prefix),\n os.listdir(self._path))\n for f in maboss_files:\n shutil.copy(self._path + '/' + f, prefix)", "def moveBigFiles(self):\n if not self.bigFilesArea:\n self.logger.info('Moving of big files to a separate volume has not been requested.')\n return\n\n self.logger.info('Moving of big files to a separate volume is requested. Scanning...')\n \n if not os.path.exists(self.bigFilesArea):\n m = 'Cannot shift big files onto inexistent volume: %s' % self.bigFilesArea\n self.logger.error(m)\n return\n \n bigFiles = self.getBigFiles()\n\n if not [val for val in bigFiles.values() if val]:\n self.logger.info('No big files were found, returning.')\n return\n \n placeToDump = createBigFileIO(self.site, self.bigFilesArea, self.workDirs, self.isNightly).getJobDumpLocation(self)\n if not placeToDump:\n m = 'Unable to retrieve location of big files volume. Not moving big files.'\n self.logger.warning(m)\n return\n\n # We have files to move, let's move them\n for bigFileBaseDir, bigFiles in bigFiles.items():\n for bigFile in bigFiles:\n src = bigFile # file\n dst = placeToDump # directory\n self.moveBigFile(src, dst)\n # If big file origin is results path, replace with a soft link\n # to separate big file volume.\n if bigFileBaseDir == self.resPath:\n self.makeReplacementKeepFile(bigFile, placeToDump)", "def _get_job_commands(self,\r\n fasta_fps,\r\n output_dir,\r\n params,\r\n job_prefix,\r\n working_dir,\r\n command_prefix='/bin/bash; ',\r\n command_suffix='; exit'):\r\n # Create basenames for each of the output files. These will be filled\r\n # in to create the full list of files created by all of the runs.\r\n out_filenames = [job_prefix + '.%d_otus.log',\r\n job_prefix + '.%d_otus.txt',\r\n job_prefix + '.%s_failures.txt']\r\n\r\n # Create lists to store the results\r\n commands = []\r\n result_filepaths = []\r\n\r\n if params['enable_rev_strand_match']:\r\n enable_rev_strand_match_str = '-z'\r\n else:\r\n enable_rev_strand_match_str = ''\r\n\r\n if params['optimal_uclust']:\r\n optimal_uclust_str = '-A'\r\n else:\r\n optimal_uclust_str = ''\r\n\r\n if params['exact_uclust']:\r\n exact_uclust_str = '-E'\r\n else:\r\n exact_uclust_str = ''\r\n\r\n if params['stable_sort']:\r\n stable_sort_str = ''\r\n else:\r\n stable_sort_str = '--suppress_uclust_stable_sort'\r\n\r\n if params['save_uc_files']:\r\n save_uc_files_str = ''\r\n out_filenames += [job_prefix + '.%d_clusters.uc']\r\n else:\r\n save_uc_files_str = '-d'\r\n\r\n # Iterate over the input files\r\n for i, fasta_fp in enumerate(fasta_fps):\r\n # Each run ends with moving the output file from the tmp dir to\r\n # the output_dir. Build the command to perform the move here.\r\n rename_command, current_result_filepaths = self._get_rename_command(\r\n [fn % i for fn in out_filenames],\r\n working_dir,\r\n output_dir)\r\n result_filepaths += current_result_filepaths\r\n\r\n command = \\\r\n '%s %s -i %s -r %s -m uclust_ref --suppress_new_clusters -o %s -s %s %s %s %s --max_accepts %s --max_rejects %s --stepwords %d --w %d %s %s %s %s' %\\\r\n (command_prefix,\r\n self._script_name,\r\n fasta_fp,\r\n params['refseqs_fp'],\r\n working_dir,\r\n params['similarity'],\r\n enable_rev_strand_match_str,\r\n optimal_uclust_str,\r\n exact_uclust_str,\r\n params['max_accepts'],\r\n params['max_rejects'],\r\n params['stepwords'],\r\n params['word_length'],\r\n stable_sort_str,\r\n save_uc_files_str,\r\n rename_command,\r\n command_suffix)\r\n\r\n commands.append(command)\r\n\r\n return commands, result_filepaths", "def backup_directory(cloud, input_file, output_file):\n if cloud.find_one(path=output_file):\n return False\n\n for root, dirnames, filenames in os.walk(input_file):\n for filename in filenames:\n filename = root + \"/\" + filename\n if filename.startswith(\"./\"):\n filename = filename[2:]\n if output_file != input_file:\n cloud_file = os.path.normpath(output_file + \"/\" + filename)\n else:\n cloud_file = filename\n if not backup_file(cloud, filename, cloud_file):\n print \"File already exists: {0}\".format(cloud_file)\n\n return True", "def task():\n if os.path.isdir(orig):\n for fP in [ fP for fP in glob.glob(os.path.join(orig, '*-*/*')) if \\\n os.path.isdir(fP) ]:\n if not os.path.exists(dest + fP[len(orig):]):\n os.makedirs(dest + fP[len(orig):])\n for fP in [ fP for fP in glob.glob(os.path.join(orig, '*-*/*/%s.log' %fmt.get_date())) if \\\n os.path.isfile(fP) ]:\n convert(fP, dest + fP[len(orig):])", "def archiveJob(appName, jobId):\n job_id = str(jobId).encode('utf8', 'ignore')\n if job_id.find('.') > 0:\n job_id = job_id.split('.')[0]\n jobs = db.getJobs(jobId=job_id)\n job = None if len(jobs) == 0 else jobs[0]\n if job == None:\n return returnError (\"Job ID, %s, does not exist\" % job_id, 404)\n\n if request.method == 'POST':\n file = request.files['file']\n if file:\n filename = secure_filename(file.filename)\n path = os.path.join(webapp.config['UPLOADED_JOBS_DEST'], appName, job_id, filename).encode(encoding='utf8', errors='ignore')\n file.save(path)\n return \"File Uploaded & archived\", 202\n else:\n return \"No file received\", 400\n\n elif request.method == 'GET':\n return '''\nUpload your file using the following CURL command:\\n\\n\n curl -i -X POST -H \"Accept: application/json\" -F file=@<filename> http://<server>:<port>/<appName>/<jobId>/archive\n''', 200", "def _write_to_tar(self, dtype_out_time):\n # When submitted in parallel and the directory does not exist yet\n # multiple processes may try to create a new directory; this leads\n # to an OSError for all processes that tried to make the\n # directory, but were later than the first.\n try:\n os.makedirs(self.dir_tar_out)\n except OSError:\n pass\n # tarfile 'append' mode won't overwrite the old file, which we want.\n # So open in 'read' mode, extract the file, and then delete it.\n # But 'read' mode throws OSError if file doesn't exist: make it first.\n utils.io.dmget([self.path_tar_out])\n with tarfile.open(self.path_tar_out, 'a') as tar:\n pass\n with tarfile.open(self.path_tar_out, 'r') as tar:\n old_data_path = os.path.join(self.dir_tar_out,\n self.file_name[dtype_out_time])\n try:\n tar.extract(self.file_name[dtype_out_time],\n path=old_data_path)\n except KeyError:\n pass\n else:\n # The os module treats files on archive as non-empty\n # directories, so can't use os.remove or os.rmdir.\n shutil.rmtree(old_data_path)\n retcode = subprocess.call([\n \"tar\", \"--delete\", \"--file={}\".format(self.path_tar_out),\n self.file_name[dtype_out_time]\n ])\n if retcode:\n msg = (\"The 'tar' command to save your aospy output \"\n \"exited with an error. Most likely, this is due \"\n \"to using an old version of 'tar' (especially if \"\n \"you are on a Mac). Consider installing a newer \"\n \"version of 'tar' or disabling tar output by \"\n \"setting `write_to_tar=False` in the \"\n \"`calc_exec_options` argument of \"\n \"`submit_mult_calcs`.\")\n logging.warn(msg)\n with tarfile.open(self.path_tar_out, 'a') as tar:\n tar.add(self.path_out[dtype_out_time],\n arcname=self.file_name[dtype_out_time])", "def _backup_chunk(self, backup, container, data, data_offset,\n object_meta, extra_metadata):\n object_prefix = object_meta['prefix']\n object_list = object_meta['list']\n\n object_id = object_meta['id']\n object_name = '%s-%05d' % (object_prefix, object_id)\n obj = {}\n obj[object_name] = {}\n obj[object_name]['offset'] = data_offset\n obj[object_name]['length'] = len(data)\n LOG.debug('reading chunk of data from volume')\n if self.compressor is not None:\n algorithm = CONF.backup_compression_algorithm.lower()\n obj[object_name]['compression'] = algorithm\n data_size_bytes = len(data)\n data = self.compressor.compress(data)\n comp_size_bytes = len(data)\n LOG.debug('compressed %(data_size_bytes)d bytes of data '\n 'to %(comp_size_bytes)d bytes using '\n '%(algorithm)s',\n {\n 'data_size_bytes': data_size_bytes,\n 'comp_size_bytes': comp_size_bytes,\n 'algorithm': algorithm,\n })\n else:\n LOG.debug('not compressing data')\n obj[object_name]['compression'] = 'none'\n\n LOG.debug('About to put_object')\n write_length_bytes = len(data)\n with self.get_object_writer(\n container, object_name, extra_metadata=extra_metadata\n ) as writer:\n writer.write(data)\n md5 = hashlib.md5(data).hexdigest()\n obj[object_name]['md5'] = md5\n LOG.debug('backup MD5 for %(object_name)s: %(md5)s',\n {'object_name': object_name, 'md5': md5})\n object_list.append(obj)\n object_id += 1\n object_meta['list'] = object_list\n object_meta['id'] = object_id\n\n LOG.debug('Calling eventlet.sleep(0)')\n eventlet.sleep(0)\n return write_length_bytes" ]
[ "0.63789195", "0.62199014", "0.614267", "0.6113023", "0.60661507", "0.6045541", "0.5890307", "0.5839137", "0.5829081", "0.58237654", "0.57699376", "0.57484376", "0.5743507", "0.57335913", "0.570491", "0.56820357", "0.5673654", "0.5669085", "0.5664019", "0.5630723", "0.56222504", "0.56179196", "0.5609798", "0.5603987", "0.5602764", "0.5594667", "0.55578125", "0.5524995", "0.55235136", "0.55218583", "0.5518489", "0.5507869", "0.54810137", "0.54761153", "0.5464383", "0.5456445", "0.5419878", "0.54142255", "0.54080874", "0.54013944", "0.5369688", "0.5352121", "0.53438365", "0.53386426", "0.5336736", "0.5334122", "0.5301527", "0.52998483", "0.52767444", "0.5272936", "0.5272356", "0.5265893", "0.5265863", "0.5249562", "0.5248862", "0.5246116", "0.5240053", "0.5232362", "0.523221", "0.5232177", "0.5232005", "0.523143", "0.52217096", "0.52174634", "0.5204749", "0.51967067", "0.51857966", "0.51849115", "0.5184609", "0.5181961", "0.5181314", "0.518114", "0.51771176", "0.517487", "0.51646394", "0.5162095", "0.5159607", "0.5153523", "0.5151427", "0.51511806", "0.5144643", "0.5144263", "0.51348805", "0.5126789", "0.51257265", "0.51131934", "0.51096505", "0.51075083", "0.51072794", "0.51065326", "0.5094919", "0.5088191", "0.50865465", "0.50822735", "0.50704294", "0.5061005", "0.50585485", "0.50539935", "0.50538045", "0.5053612" ]
0.60767967
4
Split out the data set into its inputs and its labels.
def _split_inputs_outputs(self, data): inputs = [] outputs = [] for point in data: inputs.append(point[0]) outputs.append(point[1]) return np.array(inputs), np.array(outputs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_data(self):\n if not self.load_data:\n raise AttributeError('Preprocessor has not loaded any data.')\n \n # 3 - Find example counts for each set\n self.n_examples = self.data[0].shape[0]\n self.n_train = int(self.n_examples * self.train_ratio)\n self.n_val = int(self.n_examples * self.val_ratio)\n self.n_test = self.n_examples - self.n_train - self.n_val\n \n logger.info(f'Set sizes:')\n logger.info(f'train: {self.n_train}')\n logger.info(f'val: {self.n_val}')\n logger.info(f'test: {self.n_test}')\n if self.n_test < 0:\n raise ValueError('Train + validation ratios must bef < 1')\n\n # 4 - Separate data into train, test, val\n if isinstance(self.data[0], pd.DataFrame):\n logger.info('Dataset is in a dataframe.')\n self.isdataframe = True\n\n self.train_data = [self.data[0].iloc[:self.n_train],\n self.data[1].iloc[:self.n_train]]\n \n self.val_data = [self.data[0].iloc[self.n_train:self.n_val + self.n_train],\n self.data[1].iloc[self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0].iloc[self.n_val + self.n_train:],\n self.data[1].iloc[self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n else:\n self.isdataframe = False\n logger.info('Dataset is in a numpy array.')\n \n # If datasets are numpy array or sparse\n self.train_data = [self.data[0][:self.n_train],\n self.data[1][:self.n_train]]\n \n self.val_data = [self.data[0][self.n_train:self.n_val + self.n_train],\n self.data[1][self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0][self.n_val + self.n_train:],\n self.data[1][self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n \n assert(self.n_train == self.train_data[0].shape[0])\n assert(self.n_val == self.val_data[0].shape[0])\n assert(self.n_test == self.test_data[0].shape[0])\n \n # Free memory\n del self.data\n \n if self.save_sets:\n self.save_datasets()", "def split_data_set_splitted(X, Y):\n\n # Uso la funcion de scikitlearn para separar el data_set\n # Esta funcion por defecto mezcla los datos para asegurar la representacion\n # de los datos en los dos subconjuntos\n #\n # Blanca Cano Camarero me comenta que ponga el stratify = Y porque asi se lo\n # indica el profesor Pablo Mesejo en una consulta realizada. En la referencia\n # que indico de scikitlearn tambien viene documentado este parametro\n # Lo que hace es evitar que haya clases que queden infrarepresentadas\n X_training, X_test, Y_training, Y_test= train_test_split(X, Y, train_size = 0.75, test_size = 0.25, stratify = Y)\n return X_training, X_test, Y_training, Y_test", "def split_data(data, labels):\r\n # Split the data into train and test\r\n X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.30, random_state = 42)\r\n return(X_train, y_train, X_test, y_test)", "def partition(self, data, labels):\n\t\t#TODO remove\n\t\tprint(\"label shape {}\".format(labels.shape))\n\t\treturn self.kfold.split(data[0], labels)", "def separate(self):\n print(\"start dataset separating\")\n sum = 0\n for i in tqdm(range(len(self.itemlen))):\n il = self.itemlen[i]\n if il < 3:\n sum += il\n continue\n rarr = list(range(sum, sum+il))\n random.shuffle(rarr)\n self.train.append({\n 'input': self.input[rarr[0]],\n 'label': self.label[i]\n })\n self.val.append({\n 'input': self.input[rarr[1]],\n 'label': self.label[i]\n })\n for j in range(2, len(rarr)):\n self.test.append({\n 'input': self.input[rarr[j]],\n 'label': self.label[i]\n })\n sum += il", "def split(self, X):", "def preprocess_split(self, input_dataset, last_id, num_sents, max_sent_len, prefix_id = \"\"):\n dataset = []\n for sent in input_dataset[last_id:]:\n last_id += 1\n if type(sent) == tuple or len(sent) > max_sent_len or len(sent) <= 1:\n continue\n dataset.append(self.preprocess_sent(sent, prefix_id + str(len(dataset))))\n if len(dataset) == num_sents:\n break\n\n return dataset, last_id", "def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)", "def split_data(self):\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=7)\n\n return X_train, X_test, y_train, y_test", "def partition(self, data, labels):\n\t\treturn self.kfold.split(labels)", "def split_data(train, parameters):\n labels = train.labels\n train_indices, val_indices = train_test_split(range(len(labels)),\n stratify=labels,\n random_state=parameters['seed'],\n test_size=parameters['validation_size'])\n return train_indices, val_indices", "def split_data_set_matrix(data_set):\n\n # Uso la funcion de scikitlearn para separar el data_set\n # Esta funcion por defecto mezcla los datos para asegurar la representacion\n # de los datos en los dos subconjuntos\n training, test = train_test_split(data_set, train_size = 0.75, test_size = 0.25)\n return training, test", "def split_data(self, data):\n\n train_df, test_df = train_test_split(data, test_size=self.test_size, \n random_state=0, \n stratify=data[self.outcome_name])\n\n # print(\"Splitting data into training with \", train_df.shape, \"sampes and \",\n # test_df.shape, \"testing samples\")\n\n return train_df, test_df", "def DataSplit(self, data):\n train_X,test_X,train_y,test_y=train_test_split(data[0],data[1], random_state=2)\n valid_X,valid_y=train_test_split(data[0],data[1],random_state=2,test_size=0.15)[1],train_test_split(data[0],data[1],random_state=2,test_size=0.15)[3]\n return (train_X,test_X,valid_X,train_y,test_y,valid_y)", "def split_data(data, labels, val_size):\n # Shuffle index\n index = np.random.permutation(len(data))\n\n # Split into Datasets\n X_val = data[index][-val_size:]\n X_train = data[index][:-val_size]\n y_val = labels[index][-val_size:].ravel()\n y_train = labels[index][:-val_size].ravel()\n\n return X_train, X_val, y_train, y_val", "def split_data(X:np.ndarray, y:np.ndarray) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray):\n \n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.20, random_state=42, stratify=y)\n \n return X_train, X_val, y_train, y_val", "def read_in_and_split_data(iris_data):\n iris_data = datasets.load_iris()\n data = iris_data['data']\n targets = iris_data['target']\n train_data, test_data, train_targets, test_targets = train_test_split(data, targets, test_size=0.1) \n return (train_data, test_data, train_targets, test_targets)", "def get_split_data(self):\n X, y, _, _ = self.get_subsets()\n return train_test_split(X, y, test_size=0.3, random_state=42)", "def split_data(self):\n self.train, self.val, self.test_x, self.test_y = [], [], [], []\n train_size = self.horizon\n # This assumes all countries have the same length.\n # The minus two gives space for the validation and test sets as they will overshoot.\n k_folds = len(self.countries[0].data)//self.horizon - 2\n for _ in range(k_folds):\n tr, v, te_x, te_y = self.cross_validate(train_size)\n self.train.append(tr), self.val.append(v), self.test_x.append(te_x), self.test_y.append(te_y)\n train_size += self.horizon", "def dataset_input_fn(self, train_batch_size, split):\n self._create_tf_datasets(split, train_batch_size)\n next_el = self.iterator.get_next()\n obj_ids, translations, rotations, labels, is_augmented = next_el\n points, segment_ids = self._input_fn(obj_ids, translations,\n rotations, train_batch_size,\n train_batch_size * 6, True)\n return (points, segment_ids, labels, is_augmented)", "def pre_process_data(self, all_labels, all_data):\n\n # [1] Normalizes data\n all_data = self.pre_precess_manager.normalization(all_data)\n\n data_train, data_test, label_train, label_test = train_test_split(all_data, all_labels, test_size=0.1,\n shuffle=True)\n\n return data_train, data_test, label_train, label_test", "def split(self,X,y=None):\n all_idx = pd.Series(np.arange(X.shape[0])) \n mbrg = int(X.shape[0]*self.embargo_pct)\n test_starts=[(i[0],i[-1]+1) for i in np.array_split(all_idx.values,self.n_splits)]\n for i, j in test_starts:\n t0 = all_idx.index[i] # start of test set\n test_indices = all_idx.values[i:j]\n maxT1Idx = all_idx.index.searchsorted(all_idx[test_indices].max())\n train_indices = all_idx.index.searchsorted(all_idx[all_idx<=t0].index)\n if maxT1Idx < X.shape[0]: \n train_indices=np.concatenate((train_indices,all_idx[maxT1Idx+mbrg:]))\n yield train_indices,test_indices", "def split_dataset(self, split):\n trunk_pos_size = math.ceil((1 - split) * len(self.Pos))\n trunk_neg_size = math.ceil((1 - split) * len(self.Neg))\n trunk_num = int(1 / (1 - split))\n pos_temp = list()\n neg_temp = list()\n for index in range(trunk_num):\n pos_temp.append(self.Pos[index * trunk_pos_size:(index + 1) *\n trunk_pos_size])\n neg_temp.append(self.Neg[index * trunk_neg_size:(index + 1) *\n trunk_neg_size])\n self.test = pos_temp.pop(2) + neg_temp.pop(2)\n # self.train = [i for item in pos_temp + neg_temp for i in item]\n self.train = []\n for item in pos_temp + neg_temp:\n for i in item:\n self.train.append(i)\n\n random.shuffle(self.train)\n random.shuffle(self.test)", "def test_split_data():\n from parrot import process_input_data as pid\n\n data_file = os.path.abspath(\"../data/seq_class_dataset.tsv\")\n train, val, test = pid.split_data(data_file, datatype='sequence',\n problem_type='classification', num_classes=3)\n\n assert (len(train) == 210) and (len(val) == 45) and (len(test) == 45) and (len(train[0]) == 3)", "def data_split():\n x_train = np.load(os.path.join(svhn.DEFAULT_PREPROCESSED_DATA_FOLDER, svhn.TRAIN, \"images.npy\"))\n y_train = np.load(os.path.join(svhn.DEFAULT_PREPROCESSED_DATA_FOLDER, svhn.TRAIN, \"label.npy\"))\n x_test = np.load(os.path.join(svhn.DEFAULT_PREPROCESSED_DATA_FOLDER, svhn.TEST, \"images.npy\"))\n y_test = np.load(os.path.join(svhn.DEFAULT_PREPROCESSED_DATA_FOLDER, svhn.TEST, \"label.npy\"))\n\n print(x_train.shape)\n print(x_test.shape)\n\n img_rows, img_cols = x_train.shape[1], x_train.shape[2]\n num_classes = 10 # starts with 1 not 0\n\n y_test1 = y_test.reshape((y_test.shape[0],))\n y_test1 = [y - 1 for y in y_test1]\n\n y_train1 = y_train.reshape((y_train.shape[0],))\n y_train1 = [y - 1 for y in y_train1]\n\n input_shape = (img_rows, img_cols, 3)\n\n X_train = x_train.astype('float32')\n X_test = x_test.astype('float32')\n\n X_train /= 255\n X_test /= 255\n print('x_train shape:', X_train.shape)\n print(X_train.shape[0], 'train samples')\n print(X_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n y_train2 = keras.utils.to_categorical(y_train1, num_classes)\n y_test2 = keras.utils.to_categorical(y_test1, num_classes)\n\n y_train2 = y_train2.astype('int32')\n y_test2 = y_test2.astype('int32')\n\n print(\n \"after process: X train shape: {}, X test shape: {}, y train shape: {}, y test shape: {}\".format(x_train.shape,\n x_test.shape,\n y_train2.shape,\n y_test2.shape))\n return input_shape, X_train, X_test, y_train2, y_test2", "def prepareDataBatches(self, traindata, trainlabel):\n index = np.random.permutation(len(traindata))\n traindata = traindata[index]\n trainlabel = trainlabel[index]\n split_no = int(len(traindata) / self.batchSize)\n return zip(np.split(traindata[:split_no*self.batchSize], split_no), np.split(trainlabel[:split_no*self.batchSize], split_no))", "def split_dataset(dataset):\n sequence = dataset.split()\n return sequence", "def split_dataset(dataset):\n sequence = dataset.split()\n return sequence", "def split_data(self, model_data, tuning=True):\n pass", "def split_data_set(self, vectors, labels):\n num_of_images = len(vectors)\n num_of_train_data = floor(self.config['percent_to_train_data'] * num_of_images)\n\n indices = np.random.permutation(num_of_images) # A random permutation of all indices\n X_train = [vectors[i] for i in indices[:num_of_train_data]]\n y_train = [labels[i] for i in indices[:num_of_train_data]]\n X_test = [vectors[i] for i in indices[num_of_train_data:]]\n y_test = [labels[i] for i in indices[num_of_train_data:]]\n\n return X_train, y_train, X_test, y_test", "def split_dataset(instances, labels, train_split=0.8):\n split = int(train_split * len(instances))\n train_data, train_labels = instances[:split], labels[:split]\n test_data, test_labels = instances[split:], labels[split:]\n\n return train_data, train_labels, test_data, test_labels", "def split_data(x, y, ratio, seed=1):\n \"\"\"\n Assemble the 3 label vectors with the original ordering \n Input:\n - x (ndarray) : binary prediction for set 1\n - y (ndarray) : binary prediction for set 2\n - ratio (ndarray) : binary prediction for set 3\n - seed (float) : indices of the data points in set 1 \n Output: \n - train_x (ndarray) : binary prediction for set 1\n - train_y (ndarray) : binary prediction for set 2\n - test_x (ndarray) : binary prediction for set 3\n - test_y (ndarray) : indices of the data points in set 1\n \"\"\"\n # set seed and shuffle the indices\n np.random.seed(seed)\n shuffle_indices = np.random.permutation(np.arange(len(y)))\n shuffled_y = y[shuffle_indices]\n shuffled_x = x[shuffle_indices]\n \n #splits the set according to the ratio on the shuffled set\n ratio_idx = int(np.floor(ratio*len(y)))\n train_y = shuffled_y[:ratio_idx]\n train_x = shuffled_x[:ratio_idx]\n test_y = shuffled_y[ratio_idx:]\n test_x = shuffled_x[ratio_idx:]\n return train_x, train_y, test_x, test_y", "def split_data_into_train_and_test(raw_training_data):\n train_set, test_set = train_test_split(raw_training_data, test_size=0.2, random_state=42)\n return train_set, test_set", "def make_splits(self):\n # produce fold/portion splits of the training indexes: these output indexes to the tr. indexes themselves\n if self.folds is not None:\n meta_trainval_idx = kfold_split(self.train_idx, self.folds, self.seed, self.labels, self.label_info)\n elif self.portion is not None:\n meta_trainval_idx = portion_split(self.train_idx, self.portion, self.seed, self.labels, self.label_info)\n else:\n meta_trainval_idx = [(np.arange(len(self.train_idx)), np.arange(0, dtype=np.int32))]\n # \"dereference\" the metaindexes to point to the data themselves\n self.trainval_idx = []\n for (tidx, vidx) in meta_trainval_idx:\n self.trainval_idx.append((self.train_idx[tidx], self.train_idx[vidx]))", "def split_data(X, scaling, ids, y, split_ratio=0.2):\r\n split = int(X.shape[0] * split_ratio) # index must be int\r\n X_test = X[:split, :, :, :]\r\n scaling_test = scaling[:split, :]\r\n ids_test = ids[:split]\r\n y_test = y[:split, :]\r\n X_train = X[split:, :, :, :]\r\n scaling_train = scaling[split:, :]\r\n ids_train = y[split:]\r\n y_train = y[split:, :]\r\n\r\n return X_train, scaling_train, ids_train, y_train, X_test, scaling_test, ids_test, y_test", "def splitData(X, y):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)\n print(X_train)\n print(y_train)\n print(X_test)\n print(y_test)\n return X_train, X_test, y_train, y_test", "def split(self, X, y=None, group=None):\n\n # Initiate loop variables\n trainset = []\n testset = []\n train_index = 0\n test_index = 0\n tsplit = self.startdate + self.traindur\n\n # Adjust start index to correspond to start date\n while self.dates[train_index] < self.startdate:\n train_index += 1\n\n n_pos = 0\n while tsplit + self.gap + self.testdur < self.enddate:\n # Set test index to correspond to appropriate date\n test_index = train_index\n while self.dates[test_index] < tsplit + self.gap:\n test_index += 1\n\n # Build training set\n while self.dates[train_index] < tsplit:\n trainset.append(train_index)\n train_index += 1\n\n # Build test set\n testset = []\n while self.dates[test_index] < tsplit + self.gap + self.testdur:\n testset.append(test_index)\n test_index += 1\n if y[test_index] == 1:\n n_pos += 1\n\n if self.debug:\n print(str(len(trainset)) + ' ' + str(len(testset)) + ' ' \\\n + str(n_pos) + ' ' + str(self.dates[test_index]))\n n_pos = 0\n\n # Loop update\n tsplit += self.update\n\n yield trainset, testset", "def data_split(data, labels, train_ratio=0.5, rand_seed=42):\n\n assert 0 <= train_ratio <= 1, \"Error: training set ratio must be between 0 and 1\"\n\n x_train, x_temp, y_train, y_temp = train_test_split(data,\n labels,\n train_size=train_ratio,\n random_state=rand_seed)\n\n x_val, x_test, y_val, y_test = train_test_split(x_temp,\n y_temp,\n train_size=0.5,\n random_state=rand_seed)\n\n return x_train, x_val, x_test, y_train, y_val, y_test", "def split_data(images, labels):\n images, labels = shuffle_data_pair(images, labels)\n\n num_covid_points = sum(map(lambda label: label == 0, labels))\n\n # Calculate split\n num_test = int(num_covid_points * 0.1)\n num_covid_train = num_covid_points - num_test * 2\n num_other_train = int(num_covid_train * 1.1)\n\n # (train, validate, test) points added\n num_points_added = [\n [0, 0, 0], # COVID-19\n [0, 0, 0], # Viral pneumonia\n [0, 0, 0] # Normal\n ]\n\n # Datasets\n images_train = []\n labels_train = []\n images_validate = []\n labels_validate = []\n images_test = []\n labels_test = []\n\n # Add images and labels to datasets\n notifier.send(\" Adding images and labels to dataset...\")\n for i, label in enumerate(labels):\n print(f\" Point: {i} / {len(labels)}\")\n completed_labels = [False, False, False] # Enough of label added\n if all(completed_labels):\n break\n for j in range(3): # 0: COVID-19, 1: Viral pneumonia, 2: Normal\n if completed_labels[j]:\n continue\n if label == j:\n # Add training data\n can_add_training = False\n if j == 0: # COVID-19\n if num_points_added[j][0] < num_covid_train:\n can_add_training = True\n num_points_added[j][0] += 1\n elif num_points_added[j][0] < num_other_train: # Not COVID-19\n can_add_training = True\n num_points_added[j][0] += 1\n if can_add_training:\n images_train.append(images[i])\n labels_train.append(labels[i])\n break\n\n # Add validation data\n if num_points_added[j][1] < num_test:\n num_points_added[j][1] += 1\n images_validate.append(images[i])\n labels_validate.append(labels[i])\n break\n\n # Add testing data\n if num_points_added[j][2] < num_test:\n num_points_added[j][2] += 1\n images_test.append(images[i])\n labels_test.append(labels[i])\n break\n\n # Point couldn't be added anywhere: label is complete\n completed_labels[j] = True\n break\n\n # Shuffle all data\n notifier.send(\" Shuffling data...\")\n images_train, labels_train = shuffle_data_pair(\n images_train, labels_train\n )\n images_validate, labels_validate = shuffle_data_pair(\n images_validate, labels_validate\n )\n images_test, labels_test = shuffle_data_pair(\n images_test, labels_test\n )\n\n if PLOT_LABELS:\n # Plot data frequencies\n plt.hist(labels, bins=3)\n plt.title(\"Labels\")\n\n plt.hist(labels_train, bins=3)\n plt.title(\"Train Labels\")\n\n plt.hist(labels_validate, bins=3)\n plt.title(\"Validate Labels\")\n\n plt.hist(labels_test, bins=3)\n plt.title(\"Test Labels\")\n\n plt.show()\n\n # Make labels categorical\n notifier.send(\" Making labels categorical: train...\")\n labels_train = tf.keras.utils.to_categorical(labels_train)\n notifier.send(\" Making labels categorical: validate...\")\n labels_validate = tf.keras.utils.to_categorical(labels_validate)\n notifier.send(\" Making labels categorical: test...\")\n labels_test = tf.keras.utils.to_categorical(labels_test)\n\n notifier.send(\" Converting data to NumPy arrays...\")\n return \\\n np.array(images_train), np.array(images_validate), np.array(images_test), \\\n np.array(labels_train), np.array(labels_validate), np.array(labels_test)", "def perform_data_split(X, y, training_idxs, test_idxs, val_idxs):\n X_train = X[training_idxs]\n X_test = X[test_idxs]\n #X_val = X[val_idxs]\n\n y_train = y[training_idxs]\n y_test = y[test_idxs]\n #y_val = y[val_idxs]\n\n return X_train, X_test, y_train, y_test,", "def dataSplit(self,df):\n X = df['message']\n y = df['label']\n return X, y", "def splitData(X, Y, ndigit):\n trainX = np.zeros((0, X.shape[1]))\n trainY = np.array([])\n testX = np.zeros((0, X.shape[1]))\n testY = np.array([])\n for i in xrange(ndigit):\n x = X[Y == i]\n y = Y[Y == i]\n trainX = np.concatenate((trainX, x[0:nSamplesPerClass]))\n trainY = np.concatenate((trainY, y[0:nSamplesPerClass]))\n testX = np.concatenate((testX, x[nSamplesPerClass:]))\n testY = np.concatenate((testY, y[nSamplesPerClass:]))\n return trainX, trainY, testX, testY", "def process_data(self) -> Tuple[list, List[Sequence[int]]]:\n features, labels = self.load_files()\n x = [self.process_text(f, self.max_length) for f in features]\n y = [self._convert_labels(l, self.max_length) for l in labels]\n return x, y", "def prep_data(self):\n\n self.fit_tokenizer(texts=self.texts)\n sequences = self.get_sequences(self.texts)\n self.text_data = pad_sequences(sequences, maxlen=self.MAX_SEQUENCE_LENGTH)\n\n self.labels = to_categorical(np.asarray(self.labels))\n print('Shape of data tensor:', self.text_data.shape)\n print('Shape of label tensor:', self.labels.shape)\n\n # split the data into a training set and a validation set\n indices = np.arange(self.text_data.shape[0])\n np.random.shuffle(indices)\n self.text_data = self.text_data[indices]\n self.labels = self.labels[indices]\n nb_validation_samples = int(self.VALIDATION_SPLIT * self.text_data.shape[0])\n\n x_train = self.text_data[:-nb_validation_samples]\n y_train = self.labels[:-nb_validation_samples]\n x_val = self.text_data[-nb_validation_samples:]\n y_val = self.labels[-nb_validation_samples:]\n\n return x_train,y_train, x_val, y_val", "def data_split(X, y):\n folds = KFold(n_splits=SPLITS, shuffle=True, random_state=RANDOM_STATE)\n train_indices, validation_indices = list(folds.split(X))[-1][0], list(folds.split(X))[-1][1]\n\n X_train = X.iloc[train_indices]\n X_validation = X.iloc[validation_indices]\n\n y_train = y.iloc[train_indices]\n y_validation = y.iloc[validation_indices]\n\n return X_train, X_validation, y_train, y_validation", "def split ( self, X: np.ndarray, y: np.ndarray = None ):\n # Split the indices into `number_of_folds` subarray\n indices = self.get_indices ( X )\n split_indices = KFoldCV._get_indices_split ( indices = indices, number_of_folds = self.number_of_folds )\n for number_of_split in range ( self.number_of_folds ):\n # Return all but one split as train, and one split as test\n yield KFoldCV._get_one_split ( split_indices, number_of_split = number_of_split )\n # End split()", "def split(self):\n return self.dataset_split", "def targetFeatureSplit( data ):\n\n target = []\n features = []\n for item in data:\n target.append( item[0] )\n features.append( item[1:] )\n\n return target, features", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def glue_inputs(dataset_name, split, batch_size, tokenizer, data_dir=None,\n max_len=128, training=True):\n keys_lookup = {\n \"glue/cola\": (\"sentence\", None),\n \"glue/sst2\": (\"sentence\", None),\n \"glue/mrpc\": (\"sentence1\", \"sentence2\"),\n \"glue/qqp\": (\"question1\", \"question2\"),\n \"glue/stsb\": (\"sentence1\", \"sentence2\"),\n \"glue/mnli\": (\"premise\", \"hypothesis\"), # TODO(kitaev): swap the two?\n \"glue/qnli\": (\"question\", \"sentence\"), # TODO(kitaev) swap the two?\n \"glue/rte\": (\"sentence1\", \"sentence2\"),\n \"glue/wnli\": (\"sentence1\", \"sentence2\"),\n }\n\n key_a, key_b = keys_lookup[dataset_name]\n\n if key_b is None:\n def preprocess(batch):\n \"\"\"Tokenize and convert text to model inputs.\"\"\"\n batch_size = batch[\"idx\"].shape[0]\n input_ids = np.zeros((batch_size, max_len), dtype=np.int32)\n type_ids = np.zeros((batch_size, max_len), dtype=np.int32)\n\n for i in range(batch_size):\n sentence_a = batch[key_a][i]\n tokens_a = tokenizer.EncodeAsIds(sentence_a)\n input_ids[i, :len(tokens_a)] = tokens_a[:max_len]\n\n return {\n \"input_ids\": input_ids,\n \"type_ids\": type_ids,\n \"idx\": batch[\"idx\"].astype(np.int32),\n \"label\": batch[\"label\"],\n }\n else:\n def preprocess(batch):\n \"\"\"Tokenize and convert text to model inputs.\"\"\"\n batch_size = batch[\"idx\"].shape[0]\n input_ids = np.zeros((batch_size, max_len), dtype=np.int32)\n type_ids = np.zeros((batch_size, max_len), dtype=np.int32)\n\n for i in range(batch_size):\n sentence_a = batch[key_a][i]\n sentence_b = batch[key_b][i]\n tokens_a = tokenizer.EncodeAsIds(sentence_a)\n tokens_b = tokenizer.EncodeAsIds(sentence_b)[1:] # Strip start token\n\n ex_input_ids = (tokens_a + tokens_b)[:max_len]\n ex_type_ids = ([0] * len(tokens_a) + [1] * len(tokens_b))[:max_len]\n\n input_ids[i, :len(ex_input_ids)] = ex_input_ids\n type_ids[i, :len(ex_type_ids)] = ex_type_ids\n\n return {\n \"input_ids\": input_ids,\n \"type_ids\": type_ids,\n \"idx\": batch[\"idx\"].astype(np.int32),\n \"label\": batch[\"label\"],\n }\n\n return tfds_stream(\n dataset_name=dataset_name,\n split=split,\n batch_size=batch_size,\n data_dir=data_dir,\n shuffle_files=training,\n shuffle_buffer_size=1024 if training else None,\n batch_shuffle_size=128 if training else None,\n preprocess_fun=preprocess,\n repeat=training,\n )", "def partition(self, data, labels):\n\t\traise Exception(\"Not implmented\")", "def get_transformer_splits(loader_cls, tokenizer, return_intent_labels=True):\n datasets = []\n for subset in SUBSETS:\n dataset = OODDataset(loader_cls(subset=subset), tokenizer.tokenize,\n return_intent_labels)\n dataset.vectorize_texts(tokenizer)\n datasets.append(dataset)\n return datasets", "def split_dataset(data_set, train_size, test_size):\n # Generate random indices without replacement, to make train and test sets disjoint\n rand_indices = np.random.choice(data_set.shape[0], train_size+test_size, replace=False)\n feature_end = data_set.shape[1] - 1\n output_location = feature_end\n feature_offset = var.ALGORITHM_INFO['feature_offset']\n\n # Define the training and testing matrices\n x_train = data_set[rand_indices[0:train_size], feature_offset:feature_end]\n y_train = data_set[rand_indices[0:train_size], output_location]\n x_test = data_set[rand_indices[train_size:train_size+test_size], feature_offset:feature_end]\n y_test = data_set[rand_indices[train_size:train_size+test_size], output_location]\n favorite_test = data_set[rand_indices[train_size:train_size+test_size], 0]\n\n # Normalize features, with maximum value in training set\n # as realistically, this would be the only possibility\n\n for ii in range(x_train.shape[1]):\n maxval = np.max(np.abs(x_train[:, ii]))\n if maxval > 0:\n x_train[:, ii] = np.divide(x_train[:, ii], maxval)\n x_test[:, ii] = np.divide(x_test[:, ii], maxval)\n\n\n # Add a column of ones; done after to avoid modifying entire data_set\n x_train = np.hstack((x_train, np.ones((x_train.shape[0], 1))))\n x_test = np.hstack((x_test, np.ones((x_test.shape[0], 1))))\n\n return (x_train, y_train), (x_test, y_test), favorite_test", "def partition(self, data, labels):\n\t\tfor i in range(self.splits):\n\t\t\tyield self.makePartition(len(labels))", "def process_data(data, labels):\n\t\n\t# Split the dataset of string into train, validation, and test \n\t# Use a 70/15/15 split\n\t# train_test_split shuffles the data before splitting it \n\t# Stratify keeps the proportion of labels the same in each split\n\n\t# -- WRITE THE SPLITTING CODE HERE --\n\t# Split the data into 70 percent train and 30 percent test and validate data\n\ttrain_X, test_X_split, train_Y, test_Y_split = train_test_split(data, labels, test_size=0.30, stratify=labels,random_state= 1)\n\t# Split the remaining 30 percent data into 15 percent test and validate data each\n\ttest_X, val_X, test_Y, val_Y = train_test_split(test_X_split, test_Y_split, test_size=0.50, stratify=test_Y_split, random_state= 1)\n\n\t# Preprocess each dataset of strings into a dataset of feature vectors\n\t# using the CountVectorizer function. \n\t# Note, fit the Vectorizer using the training set only, and then\n\t# transform the validation and test sets.\n\n\t# -- WRITE THE PROCESSING CODE HERE --\n\t# Preprocess dataset using CountVectorizer from ngram range of 1 to 3\n\tvector = CountVectorizer(ngram_range=(1,3))\n\t# Fit data on train dataset\n\ttrain_X = vector.fit_transform(train_X)\n\t# Transform data on test dataset\n\ttest_X = vector.transform(test_X)\n\t# Transform data on validate dataset.\n\tval_X = vector.transform(val_X)\n\t# Return the training, validation, and test set inputs and labels\n\treturn train_X, train_Y, val_X, val_Y, test_X, test_Y\n\t# -- RETURN THE ARRAYS HERE -- ", "def split_dataset(dset: tf.data.Dataset, validation_data_fraction: float):\n\n validation_data_percent = round(validation_data_fraction * 100)\n if not (0 <= validation_data_percent <= 100):\n raise ValueError(\"validation data fraction must be ∈ [0,1]\")\n\n dset = dset.enumerate()\n train_dataset = dset.filter(lambda f, data: f % 100 > validation_data_percent)\n validation_dataset = dset.filter(lambda f, data: f % 100 <= validation_data_percent)\n\n # remove enumeration\n train_dataset = train_dataset.map(lambda f, data: data)\n validation_dataset = validation_dataset.map(lambda f, data: data)\n\n return train_dataset, validation_dataset", "def split_inputs(x, vocab_sizes):\n x_split = [x[:, :, 0:2]] # lat-lon feature\n start = 2\n for val in vocab_sizes.values():\n x_split.append(x[:, :, start : (start + val)])\n start += val\n # append mask\n x_split.append(x[:, :, [-1]])\n return x_split", "def _split_sets(X, y, folds, ind=-1, sample_counter=0):\n\n fold = folds.pop(ind) - sample_counter\n X_test = X[fold, ...]\n y_test = y[fold, ...]\n X_train = np.delete(X, fold, axis=0)\n y_train = np.delete(y, fold, axis=0)\n test_fold = fold + sample_counter\n # return X_train, np.squeeze(y_train), X_val, np.squeeze(y_val)\n return X_train, y_train, X_test, y_test, test_fold", "def split_dataset(self, test_size=0.20):\n\t\t(self.training_data, self.test_data, self.training_labels, self.test_labels) = train_test_split(self.training_data, self.training_labels, test_size=test_size)", "def split_data(dataset_x, dataset_y, split_ratio):\n num_examples = len(dataset_x)\n training_x = dataset_x[:int(num_examples*split_ratio)]\n training_y = dataset_y[:int(num_examples*split_ratio)]\n\n validation_x = dataset_x[int(num_examples*split_ratio): num_examples]\n validation_y = dataset_y[int(num_examples*split_ratio): num_examples]\n\n training_y = np.asarray(training_y, dtype='float32')\n validation_y = np.asarray(validation_y, dtype='float32')\n return training_x, training_y, validation_x, validation_y", "def _prepare_sets(self):\n\n ds_images, ds_labels = self._load_images_labels()\n\n ds_images_2 = ds_images.take(self.val_count)\n ds_labels_2 = ds_labels.take(self.val_count)\n ds_images_1 = ds_images.skip(self.val_count)\n ds_labels_1 = ds_labels.skip(self.val_count)\n\n ds_1 = (ds_images_1, ds_labels_1)\n ds_2 = (ds_images_2, ds_labels_2)\n\n return ds_1, ds_2", "def split_data(basedir, data_split=0.80):\n manip = data_manipulator(basedir)\n manip.train_test_split(data_split=data_split)", "def split_input(self):\n namenode = self.runner.namenode\n splitter = Splitter(RECORDS_PER_BLOCK)\n results = []\n input_files = []\n for fname in self.inputs:\n input_files.append(RecordFile(fname, namenode))\n\n taskid = 0\n for block in splitter.split(input_files):\n fname = map_input(self.id, taskid)\n taskid += 1\n namenode.create_file(fname)\n\n bytes_written = 0\n for record in block:\n bytes_written += namenode.write_file(fname, bytes_written,\n record)\n\n namenode.close_file(fname)\n results.append(fname)\n self.open_files.append(fname)\n\n for file_ in input_files:\n file_.close()\n\n return results", "def dataset_stratified_split(split: float, dataset: np.ndarray, labels: np.ndarray) -> \\\n (np.ndarray, np.ndarray, np.ndarray, np.ndarray):\n train_X, test_X, train_Y, test_Y = train_test_split(dataset,\n labels,\n test_size=split,\n stratify=labels,\n random_state=config.RANDOM_SEED,\n shuffle=True)\n return train_X, test_X, train_Y, test_Y", "def train_test_set_split(dataset, dataset_name, test_size=0.1):\n train_indices_path = './' + dataset_name + '_train_indices(' + str(test_size) + ').txt'\n test_indices_path = './' + dataset_name + '_test_indices(' + str(test_size) + ').txt'\n try:\n train_indices = []\n test_indices = []\n file = open(train_indices_path, 'rt', encoding='utf-8')\n while True:\n line = file.readline()\n if not line:\n break\n train_indices.append(int(line[:-1]))\n file.close()\n file = open(test_indices_path, 'rt', encoding='utf-8')\n while True:\n line = file.readline()\n if not line:\n break\n test_indices.append(int(line[:-1]))\n file.close()\n train_labels = [dataset.targets[i] for i in train_indices]\n except FileNotFoundError:\n indices = np.arange(len(dataset))\n labels = np.array(dataset.targets)\n train_indices, test_indices, train_labels, _ = train_test_split(\n indices, labels, test_size=test_size, stratify=labels\n )\n file = open(train_indices_path, 'wt', encoding='utf-8')\n for i in train_indices:\n line = str(i) + '\\n'\n file.write(line)\n file.close()\n file = open(test_indices_path, 'wt', encoding='utf-8')\n for i in test_indices:\n line = str(i) + '\\n'\n file.write(line)\n file.close()\n\n train_set = torch.utils.data.Subset(dataset, indices=train_indices)\n test_set = torch.utils.data.Subset(dataset, indices=test_indices)\n return train_set, test_set, train_labels", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\r\n\r\n # Split dataset into train and test dataset\r\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],train_size=train_percentage)\r\n return train_x, test_x, train_y, test_y", "def processData(data):\n ids, instances, labels = [], [], []\n for i in data:\n idField = int(i[0])\n instance = i[1:-1]\n label = i[-1]\n ids.append(idField)\n instances.append(instance)\n labels.append(label)\n\n ids = np.array(ids)\n instances = np.array(instances)\n labels = np.array(labels)\n\n return (ids, instances, labels)", "def splitData(groupList, trainSize):\r\n from sklearn.model_selection import StratifiedShuffleSplit\r\n\r\n groupList[0]['text'] = cleanRealTexts(list(groupList[0]['text']))\r\n\r\n classLabels = np.array([])\r\n for i, group in enumerate(groupList):\r\n classLabels = np.append(classLabels, np.repeat(i, len(group)))\r\n\r\n classData = pd.concat(groupList).reset_index(drop=True)\r\n\r\n splits = list(StratifiedShuffleSplit(n_splits=i,\r\n test_size=1-trainSize,\r\n train_size=trainSize,\r\n random_state=0).split(X=classData, y=classLabels))[0]\r\n trainIdx, testIdx = splits\r\n\r\n trainData = classData.iloc[trainIdx]\r\n testData = classData.iloc[testIdx]\r\n trainLabels = classLabels[trainIdx]\r\n testLabels = classLabels[testIdx]\r\n\r\n return [[trainData, trainLabels], [testData, testLabels]]", "def split_data_into_training_and_validation(self, data):\n training_dataset = self.get_data_from_indices(data, np.arange(self.num_training_samples))\n validation_dataset = self.get_data_from_indices(data, np.arange(self.num_training_samples,\n self.p.trainer.num_samples))\n return training_dataset, validation_dataset", "def _split_dataset(self, X, y, label, index, value, sample_weights=None):\n # YOUR CODE HERE\n # Hint: Do not forget to remove the index-th feature from X.\n # begin answer\n ret1=[]\n ret2=[]\n featVec=X[:,index]\n X=X[:,[i for i in range(X.shape[1]) if i!=index ]]\n for i in range(len(featVec)):\n if featVec[i]>=value:\n ret1.append(i)\n else:\n ret2.append(i)\n sub1_X = X[ret1,:]\n sub1_y = y[ret1]\n label_1=label[ret1]\n sub1_sample_weights=sample_weights[ret1]\n sub2_X = X[ret2,:]\n sub2_y = y[ret2]\n label_2=label[ret2]\n sub2_sample_weights=sample_weights[ret2]\n # end answer\n return sub1_X, sub1_y, label_1, sub1_sample_weights, sub2_X, sub2_y, label_2, sub2_sample_weights", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\n \n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\n train_size=train_percentage)\n return train_x, test_x, train_y, test_y", "def splitData(data, class_label, seed, ratio):\n\t\n\trandom.seed(seed)\n\tsubset = data.clone()\n\tsize_data = subset.data.shape[0]\n\tn = int(np.floor(size_data * ratio)) # number of datasets in train\n\tindex = random.sample(range(1, size_data), n)\n\tsplit_list = [item for item in [0] for i in range(size_data)]\n\t\n\tfor i in index:\n\t\tsplit_list[i]=1\n\t\n\treturn split_list #returns list of indeces where 0 is test and 1 is training data ", "def split ( self, y, X = None ):\n # Make sure y is an array\n y = np.array ( y ) if isinstance ( y, list ) else y\n\n # Groupby y and add integer indices.\n df_with_split = (\n pd.DataFrame ( { \"y\": y, \"index\": np.arange ( len ( y ) ) } )\n .groupby ( \"y\" ) [ \"index\" ]\n .apply ( self.add_split_col ) # Add col for split for instance\n )\n\n # For each fold, get train and test indices (based on col for split)\n for cv_split in np.arange ( self.number_of_folds - 1, -1, -1 ):\n train_bool = df_with_split [ \"split\" ] != cv_split\n test_bool = ~ train_bool\n # Yield index values of not cv_split and cv_split for train, test\n yield df_with_split [ \"index\" ].values [ train_bool.values ], df_with_split [\n \"index\"\n ].values [ test_bool.values ]\n # End split()", "def split_dataset(X: np.array, y: np.array, ratio=0.8):\n '''split dataset to train data and valid data'''\n X_train = X[:int(X.shape[0] * ratio)]\n y_train = y[:int(y.shape[0] * ratio)]\n X_valid = X[int(X.shape[0] * ratio):]\n y_valid = y[int(y.shape[0] * ratio):]\n dataset = tuple([X_train, y_train, X_valid, y_valid])\n\n return dataset", "def _split_data(self):\n\n # Set training data\n self.train_data = torchvision.datasets.ImageFolder(\n os.path.join(self.path, 'train'),\n transform=self._transform()\n )\n self.classes = self.train_data.classes\n\n # Set validation data\n self.val_data = torchvision.datasets.ImageFolder(\n os.path.join(self.path, 'test'),\n transform=self._transform(train=False)\n )", "def preprocessing_pipeline(self):\n self.__multilabel_processing()\n self.__split_dataset()\n self.__save_datasets()", "def data_splitting(data_features, data_targets, test_size):\n data_features_train, data_features_test, data_targets_train, data_targets_test = \\\n train_test_split(data_features,\n data_targets,\n test_size = test_size)\n\n return data_features_train, data_features_test, data_targets_train, data_targets_test", "def data_splitting(data_features, data_targets, test_size):\n data_features_train, data_features_test, data_targets_train, data_targets_test = \\\n train_test_split(data_features,\n data_targets,\n test_size = test_size)\n\n return data_features_train, data_features_test, data_targets_train, data_targets_test", "def split_data(input_df, output_df):\n return train_test_split(input_df, output_df, test_size=0.2, random_state=42,\n stratify=output_df)", "def data_splitting_main(x_slid, y_slid, idx_slid, output=True):\n args = (x_slid, y_slid, data_split_ratio, idx_slid)\n\n (x_train, y_train, idx_train,\n x_val, y_val, idx_val,\n x_test, y_test, idx_test) = split_data(*args)\n\n if output:\n print('x_train.shape: ', x_train.shape)\n print('y_train.shape: ', y_train.shape)\n print('idx_train.shape: ', idx_train.shape)\n print('x_val.shape: ', x_val.shape)\n print('y_val.shape: ', y_val.shape)\n print('idx_val.shape: ', idx_val.shape)\n print('x_test.shape: ', x_test.shape)\n print('y_test.shape: ', y_test.shape)\n print('idx_test.shape: ', idx_test.shape)\n print()\n\n return (x_train, y_train, idx_train,\n x_val, y_val, idx_val,\n x_test, y_test, idx_test)", "def _create_split(cls, onnx_node, inputs, opset_version):\n axis = onnx_node.getattr(\"axis\", 0)\n split = onnx_node.getattr(\"split\", None)\n num_output = len(onnx_node.outputs)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axis, split, num_output)", "def load_data(self):\n raw_data = np.genfromtxt(self.data_file, delimiter=',')\n self.n_clusters = int(raw_data[-1][-1] + 1)\n self.n_points = len(raw_data) // self.n_clusters\n \n # group data according to label\n data = [raw_data[raw_data[:,-1] == i][:,:-1] \\\n for i in range(self.n_clusters)]\n\n # take only a subset of the data\n if self.split:\n assert 0 <= self.split <= 1, \"Split must be in [0, 1)\"\n\n # update dataset info and print to stdout\n self.n_points = int(self.split * len(data[0]))\n subsampled = self.__len__() - int(self.ood is not None) * self.n_points\n print(f\"INFO: Subsampled {subsampled}/{len(raw_data)} points\")\n \n return [cluster[:self.n_points] for cluster in data]\n return data", "def split_dataset(x, y, seed=0):\n # split the data into label and unlabel\n x_unlabel, x_label, _, y_label = \\\n train_test_split(\n x,\n y,\n test_size=0.1,\n random_state=seed,\n )\n\n # split data into train and test data\n x_train, x_test, y_train, y_test = \\\n train_test_split(\n x_label,\n y_label,\n test_size=0.2,\n random_state=seed,\n )\n\n return Dataset(\n x_unlabel,\n Data(x_train, None, y_train),\n Data(x_test, None, y_test)\n )", "def split_data(X_data, y_data):\n return cv.train_test_split(X_data, y_data, test_size=0.1, random_state=0)", "def split_dev_data(input_data: List[Tuple[str, int]]) -> Tuple[List[Tuple[str, int]],\n List[Tuple[str, int]],\n List[Tuple[str, int]]]:\n training_data, test_data = split_data(input_data)\n\n # split test data in half to test on\n dev_data = set(random.sample(test_data, int(len(test_data) / 2)))\n test_data = set(test_data) - set(dev_data)\n\n return list(training_data), list(test_data), list(test_data)", "def dataset_splits(self):\n # 10% evaluation data\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 799,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }]", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\r\n\r\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\r\n train_size=train_percentage, random_state=42)\r\n return train_x, test_x, train_y, test_y", "def _split_sample(sample):\n\n inputs, targets = sample\n return inputs, targets", "def split_data(data, test_size):\r\n ntest = int(round(len(data) * (1 - test_size)))+1\r\n \r\n train, test = data[:ntest], data[ntest:]\r\n \r\n return train,test", "def train_val_test_split(data):\n raise NotImplementedError", "def train_test_split(measurements: np.ndarray, split: float = 0.8) -> (np.ndarray, np.ndarray):\n labels_measurements = [m.label for m in measurements]\n labels = np.unique(labels_measurements)\n\n for i, l in enumerate(labels):\n indices_label = np.argwhere(np.array(labels_measurements) == l).flatten()\n\n num_samples = indices_label.size\n if i == 0:\n measurements_train = measurements[indices_label][:int(split*num_samples)]\n measurements_test = measurements[indices_label][int(split*num_samples):]\n else:\n measurements_train = np.append(measurements_train, measurements[indices_label][:int(split*num_samples)])\n measurements_test = np.append(measurements_test, measurements[indices_label][int(split*num_samples):])\n\n np.random.shuffle(measurements_train)\n np.random.shuffle(measurements_test)\n\n return measurements_train, measurements_test", "def read_and_split_sets():\n gen_train_test_sets(\"Data_Sent_Embds/en_sent.pkl\", \"Data_Sent_Embd_Splitted/en_train.pkl\",\n \"Data_Sent_Embd_Splitted/en_test.pkl\")\n gen_train_test_sets(\"Data_Sent_Embds/es_sent.pkl\", \"Data_Sent_Embd_Splitted/es_train.pkl\",\n \"Data_Sent_Embd_Splitted/es_test.pkl\")\n gen_train_test_sets(\"Data_Sent_Embds/pr_sent.pkl\", \"Data_Sent_Embd_Splitted/pr_train.pkl\",\n \"Data_Sent_Embd_Splitted/pr_test.pkl\")", "def features_and_labels(self):\n if self.is_dataset:\n if self._iterator is None:\n raise RuntimeError('Internal error: Must call dataset_initializer_hook '\n 'before calling features_and_labels(). Please file '\n 'a bug!')\n return _Inputs._parse_inputs(self._iterator.get_next())\n\n return (self._features, self._labels)", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\n\n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\n train_size=train_percentage)\n return train_x, test_x, train_y, test_y", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\n\n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\n train_size=train_percentage)\n return train_x, test_x, train_y, test_y", "def split_data(self,test=False):\n shuffle_index = torch.randperm(self.train_target.shape[0])\n load = shuffle_index.shape[0]\n train_input_shuffle = self.train_input[shuffle_index]\n train_target_shuffle = self.train_target[shuffle_index]\n train_classes_shuffle = self.train_classes[shuffle_index]\n index_train = self.index_for_equal_class(train_target_shuffle[:load//2])\n train_input = train_input_shuffle[index_train]\n train_target = train_target_shuffle[index_train]\n train_classes = train_classes_shuffle[index_train]\n if not test:\n index_test = self.index_for_equal_class( train_target_shuffle[load//2:]) + load//2\n test_input = train_input_shuffle[index_test]\n test_target = train_target_shuffle[index_test]\n test_classes = train_classes_shuffle[index_test]\n else:\n index_test = self.index_for_equal_class(self.test_target)\n test_input = self.test_input[index_test]\n test_target = self.test_target[index_test]\n test_classes = self.test_classes[index_test]\n train_input, mean, std = normalize(train_input)\n test_input, _, _ = normalize(test_input,mean,std)\n return train_input, train_target, train_classes ,test_input ,test_target ,test_classes", "def cut_all_data_and_labels_on_chunks(data: Data_dict_type, labels: Labels_dict_type,\n window_size: float, window_step: float) -> Tuple[\n Data_dict_type, Labels_dict_type]:\n for key, item in data.items():\n # extract data and sample rate of videofile\n data_array, sample_rate = item\n # calculate size of window in units (indexes)\n window_size_in_units = int(np.round(window_size * sample_rate))\n window_step_in_units = int(np.round(window_step * sample_rate))\n try:\n # try to cut data on chunks with defined window\n data_array = cut_data_on_chunks(data_array, window_size_in_units, window_step_in_units)\n data_array = np.concatenate([x[np.newaxis, ...] for x in data_array], axis=0)\n except AttributeError:\n # if size of window or step of window are more than length of data, takes full data as one window.\n data_array = data_array[np.newaxis, ...]\n data[key] = (data_array, sample_rate)\n # labels cutting, everything the same as with data cutting\n labels_dataframe = labels[key]\n try:\n labels_dataframe = cut_data_on_chunks(labels_dataframe.values, window_size_in_units, window_step_in_units)\n labels_dataframe = np.concatenate([x[np.newaxis, ...] for x in labels_dataframe], axis=0)\n except AttributeError:\n # labels now will be saved in np.ndarray format\n labels_dataframe = labels_dataframe.values[np.newaxis, ...]\n labels[key] = labels_dataframe\n return data, labels", "def getProcessedData(self, data, labels):\n if self.underSamplePercentage != 0:\n data, labels = self.underSample(data, labels)\n if self.beta != 0: \n synData, synLabels = self.adaSynAdd(data, labels)\n if synData is not None:\n data, labels = combineTestSets(data, labels, synData, synLabels)\n return data, labels", "def prepare_data(self, *args, **kwargs):\n # get paths to train and test splits\n _split_paths = [os.path.join(self.path_to_data, split)\n for split in os.listdir(self.path_to_data)]\n\n # for each split [train, test]\n for _path in _split_paths:\n _img_classes = os.listdir(_path) # get subfolders representing each class\n self.splits[os.path.basename(_path)] = []\n\n # get the images in pairs with its corresponding class\n for _class in _img_classes:\n _data = self.get_img_text_pair(os.path.join(_path, _class))\n\n if os.path.basename(_path) == 'train':\n self.weights[self.encode_label(_class)] = len(_data)\n self.splits[os.path.basename(_path)].extend(_data)", "def initSets(self):\n data_frame = pd.read_csv(self.train_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n if self.test_file is None:\n self.train_feat, self.test_feat, self.train_labl, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n else:\n data_frame = pd.read_csv(self.test_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n self.train_feat, _, self.train_labl, _ = train_test_split(features, labels, test_size=self.test_size)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n _, self.test_feat, _, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n # kfold = KFold(n_splits=3)\n # self.train_index, self.test_index = kfold.split(features,labels)" ]
[ "0.68070143", "0.6750992", "0.6724709", "0.6647016", "0.66121227", "0.65744203", "0.65072405", "0.64910185", "0.6469287", "0.6430547", "0.6300627", "0.62866193", "0.62826365", "0.62524253", "0.6250792", "0.6244052", "0.6212815", "0.6197", "0.6156624", "0.6151179", "0.61149824", "0.6108953", "0.61070466", "0.6095734", "0.6080367", "0.6073011", "0.60523045", "0.60523045", "0.6045247", "0.60369086", "0.6031695", "0.6026581", "0.6017836", "0.60094786", "0.6004849", "0.5989613", "0.59734565", "0.59665537", "0.59567875", "0.59547293", "0.59509647", "0.5945355", "0.59425336", "0.59402686", "0.5920461", "0.5919605", "0.59086853", "0.58946294", "0.58894426", "0.58651066", "0.5860725", "0.5859986", "0.5833189", "0.5831716", "0.5829594", "0.58295274", "0.5823907", "0.5821638", "0.5816679", "0.5814993", "0.58046556", "0.580331", "0.5802962", "0.5788603", "0.5784532", "0.578326", "0.5779767", "0.577286", "0.5768963", "0.5768035", "0.5756353", "0.57558554", "0.57485616", "0.57417625", "0.5741353", "0.57388717", "0.57333803", "0.57333803", "0.5732169", "0.5730424", "0.5727696", "0.5726752", "0.5726598", "0.57230717", "0.57228017", "0.57192516", "0.57139933", "0.5708648", "0.5692393", "0.5688323", "0.5681851", "0.56730384", "0.56697357", "0.566779", "0.566779", "0.56650734", "0.5662327", "0.56573325", "0.56552094", "0.56542957" ]
0.6575571
5
Method to classify a test data set and return the score in terms of accuracy.
def score(self, test_data): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def score(self, test_data):\n\n\t\tins, outs = self._split_inputs_outputs(test_data)\n\n\t\t# One hot encode the input/labels\n\t\tencoder = LabelEncoder()\n\t\tencoder.fit(outs)\n\t\tenc_labels = encoder.transform(outs)\n\t\tenc_labels = np_utils.to_categorical(enc_labels)\n\n\t\t_, score = self.model.evaluate(ins, enc_labels, verbose=2)\n\n\t\treturn score", "def accuracy(cls, test_labels):\n N = len(test_labels)\n\n # Calculate total correct as precentage\n total_correct = 100*(N - np.count_nonzero(cls - test_labels))/N\n\n # Calculate precentag correct for each class\n lab = np.unique(test_labels)\n cls_correct = {}\n for label in lab:\n idx = np.where(test_labels == label)[0]\n N_cls = len(idx)\n cls_correct[label] = 100*(N_cls - np.count_nonzero(label -\n cls[idx]))/N_cls\n\n print(\"Accuracy for:\")\n print(\"All classes is %.2f%%\" % total_correct)\n for label in lab:\n print(\"Class %d is %.2f%%\" % (label, cls_correct[label]))\n return(total_correct, cls_correct)", "def getAccuracy(self):\n\t\tcorrect = (self.testing[self.classLabel]==self.bestLabel).sum()\n\t\tself.accuracy = (correct/float(len(self.testing))) * 100.0", "def accuracy(self, X_test, y_test):\n\t\tif X_test.ndim == 1:\n\t\t\tX_test = np.reshape(X_test, (X_test.shape[0],1))\n\t\ty_pred = self.predict(X_test)\n\t\treturn np.sum(np.argmax(y_pred,axis=1)==np.argmax(y_test,axis=1))/float(y_test.shape[0])", "def classification_evaluation(self, test_set, predicted_values, certainty):\r\n\r\n percent_accuracy = self.percent_accuracy(test_set, predicted_values)\r\n one_zero = self.one_zero_loss(test_set, predicted_values)\r\n log_loss = self.log_loss(test_set, predicted_values, certainty)\r\n print(f\"Percent correct:\\t{percent_accuracy * 100:.2f}%\")\r\n print(f\"1/0 Loss:\\t\\t\\t{one_zero:.2f}\")\r\n print(\"Log Loss: \", log_loss)", "def classify(trainX, trainY, testX, testY):\n trainC = getClasses(trainY)\n P = estimatePosterior(trainX, trainC, testX)\n E = fit(testX, P)\n (e_rate, se, interval) = error.confidenceInterval(testY, E)\n return (P, E, e_rate, se, interval)", "def score(self, X, y):\n predictions = self.predict(X)\n total_values = len(y)\n accuracy = 0\n if 'classification' == self.label_type:\n correct_values = np.where(predictions == y)\n accuracy = correct_values[0].size / total_values\n elif 'regression' == self.label_type:\n sse = (y - predictions) ** 2\n sse_summed = np.sum(sse)\n accuracy = sse_summed / total_values\n\n return accuracy", "def accuracy(predictions, test_labels):\n return f1_score(test_labels, predictions, average='micro') * 100", "def percent_accuracy(self, test_set, predicted_values):\r\n\r\n correct = 0\r\n for i in range(len(test_set)):\r\n if test_set[i].classification == predicted_values[i]:\r\n correct += 1\r\n return correct / len(test_set)", "def accuracy(self, X_train, X_test):\n loss, accuracy = self.estimator.evaluate(X_test, X_train)\n return accuracy", "def accuracy(self):\n # Initialize key variables\n correct = {}\n prediction = 0\n cls_count = {}\n accuracy = {}\n\n # Analyze all the data\n for cls in self.pca_object.classes():\n # Get list of x values to test\n vectors = self.pca_object.xvalues(cls)\n\n # Process each vector\n for vector in vectors:\n # Get the prediction\n prediction = self.classifier(vector)\n\n # Only count definitive predictions\n if prediction is not None:\n # Count the number of correct predictions\n if prediction == cls:\n if cls in correct:\n correct[cls] += 1\n else:\n correct[cls] = 1\n\n # Increment the count\n if cls in cls_count:\n cls_count[cls] += 1\n else:\n cls_count[cls] = 1\n\n # Calculate per class accuracy\n correct[None] = 0\n cls_count[None] = 0\n for cls in cls_count.keys():\n if cls_count[cls] != 0:\n accuracy[cls] = correct[cls] / cls_count[cls]\n\n # Keep a tally for all successes\n correct[None] = correct[None] + correct[cls]\n cls_count[None] = cls_count[None] + cls_count[cls]\n\n # Calulate overall accuracy\n accuracy[None] = correct[None] / cls_count[None]\n\n # Return\n return accuracy", "def accuracy(y_test, y_pred):\n\treturn accuracy_score(y_test, y_pred)", "def evaluate_prediction(classifier, test_data, labels):\n \n predictions = classifier.predict(test_data)\n \n return accuracy_score(labels, predictions)", "def classify(dataset,classifier,feat_mask=None):\r\n \r\n train = dataset.get_data('train',True)\r\n X_train = train['x']\r\n if feat_mask is not None:\r\n X_train = X_train[:,feat_mask]\r\n y_train = train['y']\r\n \r\n classifier.fit(X_train,y_train)\r\n \r\n test = dataset.get_data('test',True)\r\n X_test = test['x']\r\n if feat_mask is not None:\r\n X_test = X_test[:,feat_mask]\r\n y_test = test['y']\r\n \r\n pred = classifier.predict(X_test)\r\n \r\n acc = np.count_nonzero(pred==y_test) / len(y_test)\r\n return acc,y_test,pred", "def accuracy(preds, labels):\n correct = preds == labels\n return correct.sum().float() / correct.shape[0]", "def accuracy_score(preds, y):\n accuracy = sum([1 for i in range (len(preds)) if preds[i] == y[i]])*1.0/len(preds) \n return accuracy", "def multiclass_accuracy(prediction, ground_truth):\n correct = sum(a == b for a, b in zip(prediction, ground_truth))\n\n accuracy = correct / len(ground_truth)\n\n return accuracy", "def score(self, X_test: List[str], y_test: List[str]) -> int:\n predictions_count = 0\n right_predictions_count = 0\n\n for i in range(len(X_test)):\n label = self.predict(X_test[i].split())\n predictions_count += 1\n right_predictions_count += 1 if label == y_test[i] else 0\n\n return right_predictions_count / predictions_count", "def test(self):\n\t\treturn classification_report(self.test_labels, self.predict(self.test_data), target_names=self.le.classes_)", "def evaluate_classifier(self, clf):\n\n clf = clf.fit(self.training_data_train_x, self.training_data_train_y)\n predicted = clf.predict(self.training_data_opt_x)\n\n correct = 0\n for i in range(len(self.training_data_opt_y)):\n if predicted[i] == self.training_data_opt_y[i]:\n correct += 1\n\n accuracy = correct / len(self.training_data_opt_y)\n\n return clf, accuracy", "def score_one(self, test_data):\n\n\t\ttest_in, test_labels = self._split_inputs_outputs(test_data)\n\t\tcorrect = 0\n\t\ttotal = 0\n\n\t\tfor i, test_input in enumerate(test_in):\n\t\t\tprediction = self.model.predict(test_input.reshape(1,-1))\n\t\t\tif prediction[0] == test_labels[i]:\n\t\t\t\tcorrect+=1\n\t\t\ttotal+=1\n\t\treturn float(correct)/total", "def score_one(self, test_data):\n\n\t\ttest_in, test_labels = self._split_inputs_outputs(test_data)\n\t\tcorrect = 0\n\t\ttotal = 0\n\n\t\tfor i, test_input in enumerate(test_in):\n\t\t\tprediction = self.model.predict(test_input.reshape(1,-1))\n\t\t\tif prediction[0] == test_labels[i]:\n\t\t\t\tcorrect+=1\n\t\t\ttotal+=1\n\t\treturn float(correct)/total", "def get_accuracy(test_sets, predictions, class_index):\n actual_classes = [test_set[class_index] for test_set in test_sets]\n\n num_correct = sum(int(actual == prediction) for actual, prediction in zip(actual_classes, predictions))\n\n return float(num_correct) / len(test_sets)", "def test(self):\n self.load()\n bottleneck_features = np.load(self.feature_path)\n test = bottleneck_features['test']\n _, test_targets = load_dataset(self.image_path_test) \n predictions = [np.argmax(self.model.predict(np.expand_dims(feature, axis=0))) for feature in test]\n test_accuracy = 100*np.sum(np.array(predictions) == np.argmax(test_targets, axis=1))/len(predictions)\n print('{}, test accuracy: {:.4f}%'.format(self.name, test_accuracy))\n return test_accuracy", "def evaluate(self, y_pred, y_test):\n for i in range(len(y_pred)):\n if y_pred[i] == y_test.iloc[i]:\n self.accuracy += 1\n self.accuracy = (self.accuracy/len(y_pred))", "def classification_score(self, x, y):\t\n\t\tpass", "def classify_data(self, test_set, include_features_in_result=False):\n if len(test_set) == 1:\n return self.__classify(test_set, self.__tree)\n else:\n\n indices = test_set.index.values.tolist()\n correct_classified_rows = 0\n\n classification_result = []\n\n for index in indices:\n\n training_row = pd.DataFrame(test_set.loc[index])\n training_row = training_row.T\n\n result_row = [list(x) for x in training_row.values][0]\n expected_value = str(training_row[self.__resulting_feature].iloc[0])\n classified_value = self.classify_data(training_row)\n result_row.append(classified_value)\n result_row = tuple(result_row)\n\n classification_result.append(result_row)\n\n if expected_value == classified_value:\n correct_classified_rows += 1\n\n self.accuracy_of_previous_test = (correct_classified_rows / len(test_set) * 100)\n\n column_names = list(test_set)\n column_names.append(\"classified\")\n classification_result = pd.DataFrame(classification_result, columns=column_names)\n\n if include_features_in_result:\n return classification_result\n else:\n return classification_result.iloc[:, -2:]", "def categorical_accuracy(preds, y):\n top_pred = preds.argmax(1, keepdim = True)\n correct = top_pred.eq(y.view_as(top_pred)).sum()\n acc = correct.float() / y.shape[0]\n return acc", "def test(self, dataset): \n predictions = np.zeros(len(dataset), int)\n \n accuracy = self.random_forest.score(dataset[:,:-1], dataset[:,-1]) # Predict and compute accuracy.\n predictions = self.predict(dataset[:,:-1]) # Predict and return list of predictions.\n \n return predictions, accuracy", "def findOveralAccuracy(trainData,testData):\r\n kNNClassifier = kNN(trainData)\r\n \r\n All_Predictions = kNNClassifier.classify(testData,k=5)\r\n \r\n reference_dictionary = testData.dataDict['Species']\r\n\r\n Overall_Accuracy = 100*sum(reference_dictionary== All_Predictions)/len(All_Predictions)\r\n \r\n return All_Predictions, Overall_Accuracy", "def get_accuracy(self) -> float:\n self.network.load_data()\n self.network.train()\n\n n = len(self.network.y_test)\n correct = 0\n for i in range(n):\n # Predict by running forward pass through the neural network\n pred = self.network.predict(self.network.x_test[i])\n # Sanity check of the prediction\n assert 0 <= pred <= 1, \"The prediction needs to be in [0, 1] range.\"\n # Check if right class is predicted\n correct += self.network.y_test[i] == round(float(pred))\n return round(correct / n, 3)", "def test(self,dataset):\n outputs = self.use(dataset)\n \n costs = np.ones((len(outputs),1))\n # Compute classification error\n for xy,pred,cost in zip(dataset,outputs,costs):\n x,y = xy\n if y == pred[0]:\n cost[0] = 0\n\n return outputs,costs", "def testClassifier(x_train, y_train, x_test, y_test, clf):\n #metrics = []\n start = dt.now()\n clf.fit(x_train, y_train)\n end = dt.now()\n print 'training time: ', (end - start)\n \n # add training time to metrics\n #metrics.append(end-start)\n \n start = dt.now()\n yhat = clf.predict(x_test)\n end = dt.now()\n print 'testing time: ', (end - start)\n \n # add testing time to metrics\n #metrics.append(end-start)\n \n print 'classification report: '\n# print classification_report(y_test, yhat)\n pp(classification_report(y_test, yhat))\n \n print 'f1 score'\n print f1_score(y_test, yhat, average='macro')\n \n print 'accuracy score'\n accuracy = accuracy_score(y_test, yhat)\n print accuracy\n #metrics.append(accuracy)\n #precision = precision_score(y_test, yhat, average=None)\n #recall = recall_score(y_test, yhat, average=None)\n \n # add precision and recall values to metrics\n #for p, r in zip(precision, recall):\n # metrics.append(p)\n # metrics.append(r)\n \n \n #add macro-averaged F1 score to metrics\n #metrics.append(f1_score(y_test, yhat, average='macro'))\n \n print 'confusion matrix:'\n print confusion_matrix(y_test, yhat)\n \n # plot the confusion matrix\n plt.imshow(confusion_matrix(y_test, yhat), interpolation='nearest')\n plt.show()\n \n return accuracy", "def score(self, X_test, y_test):\n correct = []\n for one in X_test:\n correct.append(self.predict(one))\n try:\n return sum(0 if correct[i] != y_test[i] else 1 for i in range(len(X_test))) / len(\n X_test\n )\n except ZeroDivisionError:\n pass", "def get_prediction(self, data, class_label):\n\t\taccuracy = 0\n\t\thit=0\n\t\tcount=0\n\t\tfor index, row in test.iterrows():\n\t\t\tcount += 1\n\t\t\ttmp = self.get_classLabel(row.tolist(),row[class_label])\n\t\t\t#print (tmp)\n\t\t\tif tmp:\n\t\t\t\thit+=1\n\t\t#print (\"hit \"+ str(hit) )\n\t\taccuracy = hit/count\n\t\t\n\t\treturn accuracy", "def classify(data, labels, (train_idx, test_idx), classifier=None):\r\n\r\n assert classifier is not None, \"Why would you pass not classifier?\"\r\n\r\n # Data scaling based on training set\r\n scaler = SupervisedStdScaler() #SupervisedRobustScaler() # # \r\n scaler.fit(data[train_idx,:], labels[train_idx], label=-1)\r\n #scaler.fit(data[train_idx,:], labels[train_idx])\r\n data_train = scaler.transform(data[train_idx,:])\r\n data_test = scaler.transform(data[test_idx,:])\r\n try:\r\n classifier.fit(data_train, labels[train_idx])\r\n \r\n \r\n confMat = confusion_matrix(labels[test_idx],\r\n classifier.predict(data_test))\r\n if confMat.shape == (1,1):\r\n if all(labels[test_idx] == -1):\r\n confMat = np.array([[confMat[0], 0], [0, 0]], dtype=confMat.dtype)\r\n else:\r\n confMat = np.array([[0, 0], [0, confMat[0]]], dtype=confMat.dtype)\r\n confMatRate = confMat / np.tile(np.sum(confMat, axis=1).astype('float'), (2,1)).transpose()\r\n totalErr = (confMat[0, 1] + confMat[1, 0]) / float(confMat.sum())\r\n #if type(classifier) not in [type(None), DummyClassifier]:\r\n if hasattr(classifier,'param_grid'): \r\n #isinstance(classifier, GridSearchCV) or \\\r\n # isinstance(classifier, RandomizedSearchCV):\r\n fitted_model = classifier.best_estimator_\r\n else:\r\n fitted_model = copy.copy(classifier) \r\n return confMatRate, totalErr, fitted_model\r\n except np.linalg.linalg.LinAlgError as e:\r\n # sahil added statement to raise the error instead of returning nun values\r\n print e.message\r\n raise e\r\n # return np.array([[np.nan, np.nan], [np.nan, np.nan]]), np.nan, None\r", "def evaluate(self, test_x, test_y):\n score = self._model.evaluate(test_x, test_y, verbose=self._verbose)\n print(\"Test score: \", score[0])\n print(\"Test accuracy: \", score[1])", "def score(self, x, y, **kwargs):\n y = np.searchsorted(self.classes_, y)\n kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)\n\n loss_name = self.model.loss\n if hasattr(loss_name, '__name__'):\n loss_name = loss_name.__name__\n if loss_name == 'categorical_crossentropy' and len(y.shape) != 2:\n y = to_categorical(y)\n\n outputs = self.model.evaluate(x, y, **kwargs)\n if not isinstance(outputs, list):\n outputs = [outputs]\n for name, output in zip(self.model.metrics_names, outputs):\n if name == 'acc':\n return output\n raise ValueError('The model is not configured to compute accuracy. '\n 'You should pass `metrics=[\"accuracy\"]` to '\n 'the `model.compile()` method.')", "def score(self, X, Y):\n # calls predict on X and predicts labels, and compares it with true labels to return accuracy\n y_hat=self.predict(X)\n Y=np.argmax(Y,axis=1)\n atrain_costuracy=(y_hat==Y).mean()\n return atrain_costuracy", "def reportAccuracy(self, testLabels=\"\"):\n assert len(self._predictions) > 0\n rawTestLabelDump = self._read_file(testLabels)\n formattedTestLabels = [line for line in rawTestLabelDump.split('\\n')]\n corrects = [1 for x in zip(self._predictions, formattedTestLabels) if x[0] == x[1]]\n return (len(corrects) / len(self._predictions)) * 100", "def accuracy(self, X, y):\n pred_labels = self.predict(X)\n return np.sum(pred_labels == y) / pred_labels.shape[0]", "def NBAccuracy(features_train, labels_train, features_test, labels_test):\n ### import the sklearn module for GaussianNB\n from sklearn.naive_bayes import GaussianNB\n\n ### create classifier\n clf = GaussianNB()#TODO\n clf.fit(features_train,labels_train)\n ### fit the classifier on the training features and labels\n #TODO\n\n ### use the trained classifier to predict labels for the test features\n pred = clf.predict(features_test)#TODO\n\n\n ### calculate and return the accuracy on the test data\n ### this is slightly different than the example, \n ### where we just print the accuracy\n ### you might need to import an sklearn module\n from sklearn.metrics import accuracy_score\n accuracy = accuracy_score(pred,labels_test)#TODO\n return accuracy", "def test(self):\n for data_tier in self.data_tiers:\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.floor(tot*0.2))\n test_features = np.array(self.preprocessed_data[data_tier]['features'][p:])\n trend_test_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][p:])\n avg_test_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][p:])\n accuracy_trend = self.clf_trend[data_tier].score(test_features, trend_test_classifications)\n accuracy_avg = self.clf_avg[data_tier].score(test_features, avg_test_classifications)\n self.logger.info('The accuracy of %s trend classifier for data tier %s is %.3f', self.name, data_tier, accuracy_trend)\n self.logger.info('The accuracy of %s avg regressor for data tier %s is %.3f', self.name, data_tier, accuracy_avg)", "def compute_accuracy(self, X_data, y_data):\n assert isinstance(X_data, np.ndarray)\n assert isinstance(y_data, np.ndarray)\n assert X_data.shape[0] == y_data.shape[0]\n \n correct = 0\n for i in range(len(X_data)):\n outputs = self.predict(X_data[i])\n outputs = outputs > 0.5\n if outputs == y_data[i]:\n correct += 1\n acc = float(correct) / len(X_data)\n return acc", "def evaluate_classifications(self):\n test_labels = open('./digitdata/testlabels', 'r')\n self.init_confusion_matrix()\n i = 0\n class_stats = {0:[0,0], 1:[0,0], 2:[0,0], 3:[0,0], 4:[0,0], 5:[0,0], 6:[0,0], 7:[0,0], 8:[0,0], 9:[0,0]}\n total_correct = 0\n num_labels = 1000\n for label in test_labels:\n int_label = int(label)\n if int_label == self.solutions[i]:\n class_stats[int_label][0] += 1\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n else:\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n class_stats[int_label][1] += 1\n i += 1\n for k in class_stats:\n print \"Class \" + str(k) + \": \" + str(float(class_stats[k][0])/class_stats[k][1])\n total_correct += float(class_stats[k][0])\n print \"Overall Accuracy: \" + str(total_correct/num_labels) \n for l in range(0,10):\n for w in range(0,10):\n self.confusion_matrix[l][w] = float(self.confusion_matrix[l][w]) / class_stats[l][1]\n \n s = [[str(e) for e in row] for row in self.confusion_matrix]\n lens = [len(max(col, key=len)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n print '\\n'.join(table)\n #self.print_confusion_matrix() ", "def NBAccuracy(features_train, labels_train, features_test, labels_test):\n ### import the sklearn module for GaussianNB\n from sklearn.naive_bayes import GaussianNB\n from sklearn.metrics import accuracy_score\n\n ### create classifier\n clf = GaussianNB()\n\n ### fit the classifier on the training features and labels\n clf.fit(features_train, labels_train)\n\n ### use the trained classifier to predict labels for the test features\n # method 1\n accuracy = clf.score(features_test, labels_test)\n \n # method 2\n pred = clf.predict(features_test)\n accuracy = accuracy_score(pred, labels_test)\n \n return accuracy", "def classify_test(classifier, test_data):\n for d in test_data:\n test(d[\"name\"], d[\"attribute\"], classifier)", "def testAccuracy(self):\n \n loader = torch.utils.data.DataLoader(dataset=self.test, \n shuffle=False)\n acc = accuracy(self.model, loader)\n self.assertEqual(acc, 1.0)\n print(acc)", "def accuracy(self):\r\n # Load tarined model using intent id.\r\n clf = joblib.load(filename=self.intention_id+'.pkl')\r\n # Compute accuracy for hole training data and return.\r\n return clf.score(X=self.training_data, y=self.target_data)", "def evaluate(self, training_scores, original_test_scores, imitation_test_scores):\n\n #finding a threshold: third to smallest training score\n sorted_scores = np.sort(training_scores)\n threshold = sorted_scores[2]\n\n #computing the number of errors\n errors = len(np.where(original_test_scores < threshold)[0])\n errors += len(np.where(imitation_test_scores > threshold)[0])\n\n #computing the local accuracy\n accuracy = 1 - errors/(len(original_test_scores)+len(imitation_test_scores))\n return accuracy, threshold", "def evaluate(y_test, pred_labels):\n \n # Converts one-hot code to a label (the index of 1)\n y_test_labels = np.argmax(y_test, axis=1)\n \n # Compare test labels to predicted labels\n score = accuracy_score(y_test_labels, pred_labels)\n \n return y_test_labels, score", "def ComputeAccuracy(test_set, test_set_predicted):\n total_tokens = 0\n error_counts = 0\n error_counts_line = 0\n\n # This loop runs for no. of sentences in \"test_set\"\n for idx in xrange(len(test_set)):\n # if sentences doesn't match exactly, count errors for line (error_counts_line) and tokens (error_counts)\n if test_set[idx] != test_set_predicted[idx]:\n error_counts_line += 1\n # This loop runs for no. of tuples (word, pos) in sentence\n for jdx in xrange(len(test_set[idx])):\n total_tokens += 1\n if test_set[idx][jdx] != test_set_predicted[idx][jdx]:\n error_counts += 1\n else:\n total_tokens += len(test_set[idx])\n\n sentence_accuracy = 100 - float(error_counts_line) * 100 / len(test_set)\n tagging_accuracy = 100 - float(error_counts) * 100 / (total_tokens - 2 * len(test_set)) # excluding sentence boundary tokens\n\n print \"Sentence Accuracy: %.2f%%\" % sentence_accuracy\n print \"Tagging Accuracy: %.2f%%\" % tagging_accuracy", "def score(self, X_test, y_test):\r\n counter = 0\r\n sr = self.predict(X_test)\r\n for i in range(len(y_test)):\r\n if sr[i] == y_test[i]:\r\n counter += 1\r\n return counter / len(y_test)\r\n pass", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def accuracy(clf, x, y, cv=5):\n print_classification_info(clf, x, y)\n return cross_val_score(clf, x, y, cv=cv).mean() * 100", "def classify(self):\n\n if self.classifier is None:\n raise ValueError('self.classifier is None')\n if self.df is None:\n raise ValueError('self.df is None')\n if self.features is None:\n raise ValueError('self.features is None')\n\n train_set = self.df[self.df[self.label_col] != CLASSIFIER_NAN]\n test_set = self.df[self.df[self.label_col] == CLASSIFIER_NAN]\n\n test_set_timestamps = list(test_set.index.strftime('%Y-%m-%d %H:%M:%S.%f'))\n\n self.classifier.fit(\n train_set[self.features],\n train_set[self.label_col]\n )\n\n preds = self.classifier.predict(test_set[self.features])\n probs = self.classifier.predict_proba(test_set[self.features])\n\n res = []\n\n for i in range(0, len(preds)):\n probability = max(probs[i])\n res.append([test_set_timestamps[i], preds[i], probability])\n\n return res", "def categorical_accuracy(preds, y):\n max_preds = preds.argmax(dim=1, keepdim=True)\n correct = max_preds.squeeze(1).eq(y)\n\n return correct.sum() / torch.FloatTensor([y.shape[0]])", "def accuracy_score(y_test, predictions):\n # my implementation\n # correct_prediction = y_test == predictions\n # accuracy_score = (predictions[correct_prediction].shape[0] /\n # (predictions.shape[0] * 1.0))\n\n # better implementation\n if len(y_test) != len(predictions):\n raise ValueError('y_test and predictions are in different shape')\n\n return (y_test == predictions).mean()", "def score(self, test_data):\n\n\t\tins, outs = self._split_inputs_outputs(test_data)\n\t\treturn self.model.score(ins, outs)", "def prediction():\r\n\r\n\r\n\tpredictVal = []\r\n\taccuracy = 0.0\r\n\r\n\t# Calculate accuracy for each class in testData\r\n\tfor item in testData:\r\n\t\tclass0Prediction = posProb / 100\r\n\t\tclass1Prediction = negProb / 100\r\n\t\t\r\n\t\t# Multiply the prior probablities for negative and positive reviews by their feature likelihoods \r\n\t\tfor word in item[2]:\r\n\t\t\tclass0Prediction *= class0Dict[word]\r\n\t\t\tclass1Prediction *= class1Dict[word]\r\n\r\n\t\t# Give every item in testData a predicted value\r\n\t\tif(class0Prediction > class1Prediction):\r\n\t\t\tpredictVal.append('0')\r\n\t\telse:\r\n\t\t\tpredictVal.append('1')\r\n\r\n\tfor i in range(len(testData)):\r\n\t\tif(testData[i][1] == predictVal[i]):\r\n\t\t\taccuracy += 1\r\n\r\n\t\t\t\r\n\taccuracy = 100 * (accuracy / len(testData))\r\n\treturn(predictVal, accuracy)", "def eval_performance(weights, test_y, test_x):\n y_predicted = predict_labels(weights, test_x)\n accuracy = len(y_predicted[y_predicted == test_y]) / len(y_predicted)\n return accuracy", "def test(self):\n y_list = []\n y_hat_list = []\n for ex_dict in ut.TEST_LIST:\n y_list.append(ex_dict[1])\n y_hat_list.append(self.predict(ex_dict[0]))\n acc = ut.compute_accuracy(y_hat_list, y_list)\n return y_hat_list, acc", "def accuracy1(y_test, predictions):\n accuracy = 0.0\n\n for i in range(y_test.shape[0]):\n intersection = 0.0\n union = 0.0\n for j in range(y_test.shape[1]):\n if int(y_test[i,j]) == 1 or int(predictions[i,j]) == 1:\n union += 1\n if int(y_test[i,j]) == 1 and int(predictions[i,j]) == 1:\n intersection += 1\n \n if union != 0:\n accuracy = accuracy + float(intersection/union)\n\n accuracy = float(accuracy/y_test.shape[0])\n\n return accuracy", "def knn(train_data, train_labels, test_data, test_labels, k):\n pred_labels = []\n for t in test_data:\n dist = calculate_distances(train_data, t)\n pred_class = majority_voting(dist, train_labels, k)\n pred_labels.append(pred_class)\n correct_pred_count = np.sum(pred_labels == test_labels)\n acc = correct_pred_count/len(test_labels)\n return acc", "def test_classify(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n data_to_classify = [1, 0.5]\n classifications = ada_boost.classify(data_to_classify, classifiers)\n expected = np.mat([-1.])\n self.assertEqual(classifications, expected)", "def test(self, test_X, test_y):\n if self.feat_sel:\n test_X = self.skb.transform(test_X)\n predicted = self.clf.predict_proba(test_X)[:, 1]\n return roc_auc_score(test_y, predicted), average_precision_score(test_y, predicted)", "def get_accuracy(pred, test_label, regression= \"logistic\"):\n if regression == \"multiclass\":\n pred_max = np.argmax(pred, axis=1)\n gt_max = np.argmax(test_label, axis=1)\n acc = np.sum(pred_max == gt_max)*100.0/pred.shape[0]\n elif regression == \"logistic\" or regression == \"probit\":\n if pred.ndim == 2:\n pred = pred[:,0]\n pred[pred >= 0.5] = 1.0\n pred[pred < 0.5] = 0.0\n acc = np.sum(pred == test_label)*100.0/pred.shape[0]\n\n return acc", "def show_score(clf, X_test, y_test):\n y_pred = predict(clf, X_test)\n print metrics.classification_report(y_test.astype(np.int), y_pred)", "def evaluate_preds_classification(y_true, y_preds):\n accuracy = accuracy_score(y_true, y_preds)\n precision = precision_score(y_true, y_preds)\n recall = recall_score(y_true, y_preds)\n f1 = f1_score(y_true, y_preds)\n metric_dict = {\"accuracy\": round(accuracy, 2),\n \"precision\": round(precision, 2),\n \"recall\": round(recall, 2),\n \"f1\": round(f1, 2)}\n print(f\"Accuracy: {accuracy * 100:.2f}%\")\n print(f\"Precision: {precision}\")\n print(f\"Recall: {recall}\")\n print(f\"F1 Score: {f1} \\n\")\n return metric_dict", "def NBAccuracy(features_train, labels_train, features_test, labels_test):\n ### import the sklearn module for SVM\n from sklearn.svm import SVC\n\n ### create classifier specifying the kernel\n clf = SVC(kernel=\"rbf\", C = 10000)\n\n ### these lines effectively slice the training dataset down \n ### to 1% of its original size, tossing out 99% of the training data.\n #features_train = features_train[:len(features_train)/100] \n #labels_train = labels_train[:len(labels_train)/100]\n\n ### Calculate the Time spent to train our algorithm\n t0 = time()\n ### fit the classifier on the training features and labels\n clf.fit(features_train, labels_train)\n print \"Training time:\", round(time()-t0, 3), \"s\"\n\n ### Calculate the Time spent in the prediction\n t0 = time()\n ### use the trained classifier to predict labels for the test features\n pred = clf.predict(features_test)\n\n print \"Prediction time:\", round(time()-t0, 3), \"s\"\n\n print \"Prediction for element #10:\", pred[10]\n print \"Prediction for element #26:\", pred[26]\n print \"Prediction for element #50:\", pred[50]\n print \"We could predict \", (sum(i == 1 for i in pred)),\"in \", len(features_test),\"test events bilong to Chris\"\n\n ### calculate and return the accuracy on the test data\n from sklearn.metrics import accuracy_score\n accuracy = accuracy_score(pred, labels_test)\n \n ### Another way\n ### accuracy = clf.score(features_test, labels_test)\n return accuracy", "def test(self, test_instances, test_labels):\n scores = self.classifier.predict(test_instances)\n # TODO: print report", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_classifier.predict(data)", "def evaluate(test_set, predictions):\n full_report = metrics.classification_report(test_set.labels, predictions,\n labels=range(len(test_set.index2label)),\n target_names=test_set.index2label, digits=3)\n pre, rec, f1, support = metrics.precision_recall_fscore_support(test_set.labels, predictions, average='weighted')\n return pre, rec, f1, support, full_report", "def accuracy(self, data, class_loss_scores=False):\n predicts = [self.feedforward(xy[0].T) for xy in data]\n refs = [xy[1].T for xy in data]\n if class_loss_scores:\n # TODO NOTE that class loss function indicate an error not positive label,\n # loss function inverse scores\n predicts = [1.0 - self.cost.class_loss_scores(s) for s in predicts]\n # threshold network score\n pred_labs = [cf.step(x, self.threshold) for x in predicts]\n return metrics.micro_f1(refs, pred_labs, False)", "def score_all(results):\n Y = np.concatenate([results['%dtest'%n] for n in range(10)])\n print score(np.concatenate([results['%dtrain'%n] for n in range(10)]))\n print score(np.concatenate([results['%dtest'%n] for n in range(10)]))\n class_counts = np.asarray([(Y[:,0]==n).sum() for n in range(10)])\n return confusion_matrix(Y[:,0],Y[:,1]), class_counts", "def accuracy(labels, preds):\n\tassert labels.shape[0]==preds.shape[0]\n\treturn np.sum(preds==labels)/float(labels.shape[0])", "def accuracy(predicted, ground_truth):\n predicted_labels_decoded = np.argmax(predicted, axis=1)\n ground_truth_labels_decoded = np.argmax(ground_truth, axis=1)\n correct_rate = [1 if pred == truth else 0 for (pred, truth) in\n zip(predicted_labels_decoded, ground_truth_labels_decoded)]\n accuracy = sum(correct_rate) / ground_truth_labels_decoded.size\n return accuracy * 100", "def evaluate(self, test_dir='data/dev', target='real'):\n test_data = {c: os.path.join(test_dir, c) for c in self.classes}\n if not target in test_data:\n print('Error: target class does not exist in test data.')\n return\n outcomes = {'TP': 0, 'TN': 0, 'FP': 0, 'FN': 0}\n # >>> YOUR ANSWER HERE\n data = []\n for c in test_data:\n docs = open(test_data[c]).readlines()\n for doc in docs:\n preprocess_doc = doc.strip()\n data.append((c, preprocess_doc))\n for item in data:\n predict_ans = self.predict(item[1])\n if item[0] == 'real':\n if predict_ans == 'real':\n outcomes['TP'] += 1\n else:\n outcomes['FN'] += 1\n else:\n if predict_ans == 'real':\n outcomes['FP'] += 1\n else:\n outcomes['TN'] += 1\n precision = outcomes['TP'] / (outcomes['TP'] + outcomes['FP']) # replace with equation for precision\n recall = outcomes['TP'] / (outcomes['TP'] + outcomes['FN']) # replace with equation for recall\n f1_score = 2 * ((precision * recall) / (precision + recall)) # replace with equation for f1\n # >>> END YOUR ANSWER\n return precision, recall, f1_score", "def accuracy(self, x, t):\n\n y = self.predict(x)\n y_labels = np.argmax(y, axis=1)\n t_labels = np.argmax(t, axis=1)\n\n num_all = x.shape[0]\n num_hit = np.sum(y_labels == t_labels)\n return num_hit / float(num_all)", "def train_and_test_model(self, X_train, y_train, X_test, y_test):\n\n\t\t# Fit the classification model on the whole training set (as opposed to cross-validation)\n\t\t# print(\"Y TRAIN: \", y_train[:10])\n\t\t# print(\"x TRAIN: \", X_train[:10])\n\t\tself.classifier.fit(X_train, y_train)\n\t\ty_train_predicted = self.classifier.predict(X_train)\n\t\tprint(\"np.mean Accuracy TRAINING: %s\" % np.mean(y_train_predicted == y_train))\n\n\t\t''' Predict the outcome on the test set\n\t\t\tNote that the clf classifier has already been fit on the training data.\n\t\t'''\n\t\ty_predicted = self.classifier.predict(X_test)\n\n\t\tprint(\"%.2f seconds: Finished training the model and predicting class labels for the test set\" % time.process_time())\n\n\t\t# Simple evaluation using numpy.mean\n\t\t# print(\"np.mean Accuracy: %s\" % np.mean(y_predicted == y_test))\n\n\t\t# Log the classification report\n\t\t# print(\"Classification report:\\n%s\" % metrics.classification_report(y_test, y_predicted))\n\n\t\t# The confusion matrix\n\t\t# confusion_matrix = metrics.confusion_matrix(y_test, y_predicted)\n\t\t# print(\"Confusion matrix:\\n%s\" % confusion_matrix)", "def categorical_accuracy(preds, y):\n max_preds = preds.argmax(\n dim=1, keepdim=True\n ) # get the index of the max probability\n correct = max_preds.squeeze(1).eq(y)\n return correct.sum() / torch.FloatTensor([y.shape[0]]).to(device)", "def classify(self, X, y):\n\n clf = svm.SVC(kernel='linear', C=1)\n cv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)\n\n scores = cross_val_score(clf, X, y, cv=cv, scoring='balanced_accuracy')\n\n return scores", "def check_test_accuracy(self):\n print('\\n# Evaluate on test data')\n results = self.model.evaluate(self.data.test_dataset)\n print('\\ntest loss, test acc:', results)", "def classify(self, nn=1):\n\t\t#err=0\n\t\tpossibilities=[]\n\t\tfor i in range(len(self.X_test)):\n\t\t\tfor lines in range(len((self.X_train))):\n\t\t\t\tdist=np.linalg.norm(self.X_test[i]-self.X_train[lines])\n\t\t\t\tpossibilities.append([dist,self.Y_train[lines]])\n\t\t\tpossibilities.sort()\n\t\t\tfinal=[]\n\t\t\tfor c in range(0,15):\n\t\t\t\tfinal.append(possibilities[c][1])\n\t\t\t\tprint possibilities[c][1]\n\t\t\tcount=np.zeros(10)\n\t\t\tfor m in final:\n\t\t\t\tcount[m]+=1\n\t\t\t\n\t\t\tans=np.any(count==count.max())\n\t\t\t\n\t\t\tprint \"actual=\",self.Y_test[i]\n\t\t\tif(ans!=self.Y_test[i]):\n\t\t\t\tglobal err\n\t\t\t\terr=err+1", "def evaluate(self, test=None):\n if test is None:\n test = self.testSet.input\n # Once you can classify an instance, just use map for all of the test set.\n return list(map(self.classify, test))", "def _score(self, estimator, train, test):\n b = estimator.fit(self.A[train], self.b[train]).predict(self.A[test])\n return accuracy_score(self.b[test], b)", "def get_test_accuracy(model, X_test, y_test):\n # Make predictions - test accuracy\n test_pred = model.predict(X_test)\n score = accuracy_score(test_pred, y_test)\n print(\"Test Accuracy:\", score)\n\n return test_pred", "def test_accuracy(self, _input_data, _labels, quiet=False):\n test_loss, test_accuracy = (self.merged_model).evaluate(\n _input_data, _labels, verbose=0\n )\n\n return test_accuracy", "def accuracy_score(y_true, y_pred):\n accuracy = np.sum(y_true == y_pred, axis=0) / len(y_true)\n return accuracy", "def accuracy_score(y_true, y_pred):\n accuracy = np.sum(y_true == y_pred, axis=0) / len(y_true)\n return accuracy", "def test(model, X_test, y_test, config):\n loss, y_pred = model.forward_pass(X_test)\n\n y_maxVals = np.amax(y_pred, axis=1).reshape(-1, 1)\n y_1hot = np.where(y_maxVals == y_pred, 1, 0)\n correct = np.sum(y_test * y_1hot)\n\n accuracy = correct / len(X_test)\n return accuracy", "def nb_accuracy(features_train, labels_train, features_test, labels_test):\n ### create classifier\n clf = GaussianNB()\n\n ### fit the classifier on the training features and labels\n clf.fit(features_train, labels_train)\n\n ### use the trained classifier to predict labels for the test features\n predictions = clf.predict(features_test)\n\n ### calculate and return the accuracy on the test data\n ### this is slightly different than the example,\n ### where we just print the accuracy\n ### you might need to import an sklearn module\n\n # accuracy = no of test points that are classified correctly /\n # total no of points (in a test set)\n\n # method#1: write code that compares predictions to y_axis_test, element-by-element\n # method#2: google \"sklearn accuracy\" and go from there\n # method#3: There's another way you can do this, too\n # print clf.score(features_test, labels_test)\n #accuracy = clf.score(features_test, labels_test)\n accuracy = accuracy_score(predictions, labels_test)\n return accuracy", "def accuracy(pred, target):\n N = pred.shape[0]\n return (pred == target).sum() * 1.0 / N", "def accuracy(y_true, y_pred):\n assert y_true.shape == y_pred.shape\n return (y_true == y_pred).mean()", "def scoring(estimator, features_test, labels_test):\n pred = estimator.predict(features_test)\n p = metrics.precision_score(labels_test, pred, average='micro')\n r = metrics.recall_score(labels_test, pred, average='micro')\n if p > 0.3 and r > 0.3:\n return metrics.f1_score(labels_test, pred, average='macro')\n return 0", "def evaluate(self, test=None):\n if test is None:\n test = self.testSet.input\n # Once you can classify an instance, just use map for all of the test\n # set.\n return list(map(self.classify, test))", "def classify(X, Y, skf, clf, round_threshold=0.5, average=\"macro\"):\n X = X.values\n if isinstance(Y, pd.Series):\n labels = [\"{}_0\".format(Y.name), \"{}_1\".format(Y.name)]\n Y = np.ravel(Y)\n else:\n Y, labels = Y.values, list(Y.columns)\n\n fold_results = []\n for train, test in skf.split(X, Y):\n current_clf = clone(clf)\n X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]\n\n current_clf.fit(X_train, Y_train)\n Y_prob = current_clf.predict_proba(X_test)\n Y_pred = current_clf.predict(X_test)\n\n (p, r, f1, auc, jac, hl, p_c,\n r_c, f1_c, s_c) = calculate_metrics(Y_test, Y_pred, Y_prob, average)\n\n # calculate overall scores for current fold\n fold_scores = {\n \"precision\": p,\n \"recall\": r,\n \"f1\": f1,\n \"auc\": auc,\n \"jaccard\": jac,\n \"hamming_loss\": hl\n }\n\n for i in range(len(labels)):\n fold_scores[\"precision_{0}\".format(labels[i])] = p_c[i]\n fold_scores[\"recall_{0}\".format(labels[i])] = r_c[i]\n fold_scores[\"f1_{0}\".format(labels[i])] = f1_c[i]\n fold_scores[\"support_{0}\".format(labels[i])] = s_c[i]\n\n fold_results.append({\n \"scores\": fold_scores,\n \"y_pred\": Y_pred,\n \"y_prob\": Y_prob,\n \"y_test\": Y_test\n })\n\n scores = {}\n for score in fold_results[0][\"scores\"].keys():\n values = [s[\"scores\"][score] for s in fold_results]\n scores[score] = (np.sum(values) if score.startswith(\"support_\")\n else np.mean(values))\n\n return scores, fold_results", "def evaluate(self, test_set, predicted_values, certainty):\r\n\r\n if self.classification_type == \"classification\":\r\n self.classification_evaluation(test_set, predicted_values, certainty)\r\n elif self.classification_type == \"regression\":\r\n self.regression_evaluation(test_set, predicted_values)", "def overall_accuracy(y_true, y_pred):\n pred_flat, true_flat = y_pred.flatten(), y_true.flatten()\n intersection = list(pred_flat == true_flat).count(True)\n sum_ = len(true_flat)\n accuracy = round(intersection/sum_, 4)\n return accuracy", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.data.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def accuracy(yHat, yTrue):\n # TODO calculate the accuracy\n acc = 0\n for i in range(len(yHat)): # count the number of correct classifications\n if yHat[i] == yTrue[i]:\n acc += 1\n return acc / len(yTrue) # return the num correct / total test samples" ]
[ "0.7827106", "0.7716559", "0.76527333", "0.7484252", "0.7440777", "0.74163395", "0.73581105", "0.73344773", "0.7304312", "0.7296065", "0.7292504", "0.7276412", "0.7261127", "0.7254433", "0.72333145", "0.7222515", "0.7206312", "0.7166098", "0.7164757", "0.71625596", "0.71616316", "0.71616316", "0.71565634", "0.71556395", "0.71511847", "0.7128608", "0.71202165", "0.70871365", "0.707933", "0.7077829", "0.7077484", "0.7072452", "0.70676696", "0.7061218", "0.7037335", "0.7028106", "0.7001005", "0.6992519", "0.6977518", "0.6971626", "0.69690007", "0.69606775", "0.6960382", "0.6948792", "0.69485444", "0.6938345", "0.693832", "0.6930434", "0.69216955", "0.6920842", "0.69103134", "0.68892676", "0.68872637", "0.6886996", "0.6886718", "0.68687224", "0.68641686", "0.6861558", "0.6858754", "0.68566567", "0.6852216", "0.6851073", "0.68415004", "0.6832126", "0.68189466", "0.68186796", "0.6816093", "0.68133634", "0.68015903", "0.6798393", "0.6785856", "0.67840236", "0.6769236", "0.676578", "0.67629355", "0.67595", "0.6758879", "0.6746077", "0.6736455", "0.6735567", "0.6726786", "0.672439", "0.6721811", "0.6719022", "0.67103183", "0.6689049", "0.66880363", "0.6686092", "0.6679872", "0.6679872", "0.6678789", "0.667702", "0.6673856", "0.6672861", "0.66696334", "0.6669122", "0.66673124", "0.66630256", "0.6661059", "0.6657598", "0.6657271" ]
0.0
-1
Fit the knn model.
def _fit(self, data): train_in, train_labels = self._split_inputs_outputs(data) clf = KNeighborsClassifier(n_neighbors=self.k) clf.fit(train_in, train_labels) return clf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit(self):\n \n self.knn_model = KNeighborsRegressor(n_neighbors=self.k,\n weights='distance')\n self.knn_model.fit(self.X, self.y)", "def fit(self, Xtrain, ytrain):\n self.Xtrain = Xtrain\n self.ytrain = ytrain\n\n max_nn = len(Xtrain)\n # initiate the class and fit the data\n self.knn = neighbors.KNeighborsRegressor(max_nn, weights=self.weights)\n self.knn.fit(Xtrain,ytrain)", "def fit_training_data(self):\n self.model.fit(self.X_train)", "def fit(self, trainingFeatures, trainingTargets):\r\n\r\n \"\"\" Implement kNN learning below. \"\"\"\r\n\r\n self._fitCalled = True\r\n self.labels = trainingTargets\r\n self.data = trainingFeatures", "def fit(self, X, y, **kwargs):\n\n\t\tif kwargs:\n\t\t\tself.view1_columns_ = kwargs['view1_columns']\n\t\t\tself.view2_columns_ = kwargs['view2_columns']\n\t\t\tself.knn_view1_.n_neighbors = kwargs['n_neighbors']\n\t\t\tself.knn_view2_.n_neighbors = kwargs['n_neighbors']\n\t\t\tself.knn_view3_.n_neighbors = kwargs['n_neighbors']\n\n\t\tX_view1 = X[:, self.view1_columns_]\n\t\tX_view2 = X[:, self.view2_columns_]\n\n\t\t# Train the KNN classifiers.\n\t\tself.knn_view1_.fit(X_view1, y)\n\t\tself.knn_view2_.fit(X_view2, y)\n\t\tself.knn_view3_.fit(X, y)\n\n\t\t# Train the Bayesian classifiers.\n\t\tself.gauss_view1_.fit(X_view1, y)\n\t\tself.gauss_view2_.fit(X_view2, y)\n\t\tself.gauss_view3_.fit(X, y)\n\n\t\t# Calculate the probabilities.\n\t\t_, classes_count = np.unique(y, return_counts=True)\n\t\tself.P_w = classes_count/np.sum(classes_count)\n\n\t\treturn self", "def fit_full(self, K):\r\n pass", "def fit(self,X_train,y_train):\n assert X_train.shape[0] == y_train.shape[0], \\\n \"the size of X_train must equal to the size of y_train\"\n assert self.k <= X_train.shape[0],\\\n \"the size of X_train must be at least k\"\n self._X_train =X_train\n self._y_train =y_train\n\n return self", "def fit(self, kk=None):\n self.fft_fit.fit(kk)", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def training(self):\n self.model.fit(self.train_x, self.train_y)", "def fit_and_predict_KNN(X_train, Y_train, X_test, K):\n \n # Import the package\n from sklearn.neighbors import KNeighborsClassifier\n\n ### YOUR SOLUTION STARTS HERE###\n #referenced to sklearn documentation\n # fit the model (for KNN this is just storing the training data and labels) \n clf = KNeighborsClassifier(n_neighbors=K).fit(X_train, Y_train)\n # Predict\n predicted_KNN = clf.predict(X_test)\n return predicted_KNN", "def fit(self):\n \n print(\"Training model...\")\n center = self.center\n model = self.model\n n_epochs = self.config.epochs\n best_val_loss = np.inf\n for epoch in range(n_epochs):\n model.train()\n loop = tqdm(self.train_dataloader)\n for xb, _ in loop:\n loss = self.loss_batch(xb)\n loop.set_description(\"Epoch [{}/{}] \".format(epoch, n_epochs))\n loop.set_postfix({\"loss\":loss.item()})\n\n model.eval()\n with torch.no_grad():\n losses = [torch.cdist(model(xb), center.view(1, -1))\n for xb, yb in self.val_dataloader]\n losses = [x.item() for xb in losses for x in xb]\n val_loss = np.mean(losses) + self.get_regularizer_term()\n print(\"val_loss={:.6f}\".format(val_loss))\n\n if val_loss < best_val_loss:\n best_model_state = copy.deepcopy(model.state_dict())\n best_val_loss = val_loss\n self.save_model(self.config.mnist_cnn_weights, best_model_state)", "def _fit(self, X, y):\n # store full data as indexed X\n self._X = X\n\n if self.pass_train_distances:\n dist_mat = self._distance(X)\n else:\n # if we do not want/need to pass train-train distances,\n # we still need to pass a zeros matrix, this means \"do not consider\"\n # citing the sklearn KNeighborsClassifier docs on distance matrix input:\n # \"X may be a sparse graph, in which case only \"nonzero\" elements\n # may be considered neighbors.\"\n X_inner_mtype = self.get_tag(\"X_inner_mtype\")\n _, _, X_meta = check_is_mtype(X, X_inner_mtype, return_metadata=True)\n n = X_meta[\"n_instances\"]\n dist_mat = np.zeros([n, n], dtype=\"float\")\n\n self.knn_estimator_.fit(dist_mat, y)\n\n return self", "def fit():\n pass", "def model(self):\n\n logger.info('[*] Starting processing of dataset ...')\n \n cl = classify.Classify(logger)\n data = cl.get_dataset()\n\n logger.info('[*] Using K-nearest neighbour algorithm ...')\n \n self.knn_model = KNeighborsClassifier(n_neighbors = self.knn)\n self.train_and_test(data)\n\n return True", "def fit(self):\n accuracy = 0\n no_improvement = 0\n epochs = trange(self.args.epochs, desc=\"Accuracy\")\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)\n self.model.train()\n for epoch in epochs:\n self.optimizer.zero_grad()\n prediction = self.model(self.propagation_matrix, self.features)\n loss = torch.nn.functional.nll_loss(prediction[self.train_nodes], self.target[self.train_nodes])\n loss = loss + self.args.lambd*torch.sum(self.model.page_rank_convolution_1.weight_matrix**2)\n loss.backward()\n self.optimizer.step()\n new_accuracy = self.score(self.validation_nodes)\n epochs.set_description(\"Validation Accuracy: %g\" % round(new_accuracy,4))\n if new_accuracy < accuracy:\n no_improvement = no_improvement + 1\n if no_improvement == self.args.early_stopping:\n epochs.close()\n break\n else:\n no_improvement = 0\n accuracy = new_accuracy \n acc = self.score(self.test_nodes)\n print(\"\\nTest accuracy: \" + str(round(acc,4)) )", "def fit(self, X):", "def train(self, trainX, trainY):\n self.model = KNeighborsRegressor(n_neighbors=5)\n self.model.fit(trainX, trainY)", "def create_fit_model(features_df):\n model = NearestNeighbors(n_neighbors=10,\n n_jobs=-1)\n knn_spotify = model.fit(features_df)\n return knn_spotify", "def fit_model(self):\n logger.info('Fitting model')\n if self.traj_dict is None:\n self.traj_dict = self.get_traj_dict()\n self.model.fit(self.traj_dict.values())", "def fit(self, gamma, K):\n self.K = K\n self.gamma = gamma\n self.cluster()\n self.generate_phi()\n self.LRC()", "def __fit_model(self):\n\n labels = self.labeled_labels\n features = self.labeled_features\n\n pred = np.array(cross_val_predict(self.clf,\n features,\n labels,\n cv=self.cv))\n\n stats = self.__get_statistics(labels, pred)\n self.statistics.append(stats)\n\n self.clf.fit(features, labels)\n\n return self", "def fit(self, X_train, y_train):\n\n shuffle_dataset(X_train, y_train)\n X = X_train\n\n\n # Add label 0 to features\n # X = concatenate_csr_matrices_by_columns(X_train, y_train[:, 0])\n\n self.label_dim = y_train.shape[1]\n\n _init = False\n # if self.verbose > 0:\n iterator = tqdm.tqdm(range(0, self.label_dim))\n # iterator = range(1, self.label_dim)\n\n for i in iterator:\n # Train Classifier i\n y = y_train[:, i]\n\n # Create and fit an instance of chosen classifier with chosen arguments and train it\n clf = self.classifier_type(learning_rate=self.learning_rate,\n batch_size=self.batch_size,\n iterations=self.iterations)\n clf.fit(X, y)\n\n if not _init:\n self.lossHistory = np.zeros(len(clf.lossHistory))\n _init = True\n self.lossHistory = self.lossHistory + np.asarray(clf.lossHistory) / 1000\n\n # Save the trained instance\n self.trained.append(clf)\n\n # Add label i to features\n # X = concatenate_csr_matrices_by_columns(X, y)\n\n if self.limit_iterations_for_debug != None:\n if i == self.limit_iterations_for_debug: exit(0)\n\n return self", "def fit(self,X_train,y_train):\r\n \r\n self.X_train_data=X_train.reset_index(drop=True)\r\n self.y_train_data=y_train.reset_index(drop=True)\r\n \r\n temp_fitted_model=[]\r\n for each_model in self.model_list:\r\n each_model.fit(self.X_train_data,self.y_train_data)\r\n temp_fitted_model.append(each_model)\r\n \r\n self.fitted_model=temp_fitted_model", "def fit(self, X_train, y_train):\n return self", "def fit(self):\n raise NotImplementedError", "def KNN(x_train, x_test, y_train, k=3):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(x_train, y_train)\n y_pred = knn.predict(x_test)\n return y_pred", "def fit(self, X, y) :\n \n ### ========== TODO : START ========== ###\n # part b: set self.probabilities_ according to the training set\n training_set = y.sum()/y.size\n self.probabilities_ = training_set\n return self\n ### ========== TODO : END ========== ###\n \n return self", "def fit ( self, X: np.ndarray, y: np.ndarray ):\n \n # Track changes to subset Z\n is_changed = True\n\n # Z begins as empty set\n subset_Z = []\n while is_changed:\n is_changed = False\n # If subset Z is empty, add the 1st element of feature matrix X\n if not subset_Z:\n subset_Z.append ( 0 )\n\n # Iterate through rows of X\n for row_index in range ( 1, len ( X ) ):\n # If the row is not already in Z\n if row_index not in subset_Z:\n # Run inclusion procedure\n changed = self.get_inclusion ( X, y, row_index, subset_Z)\n # Track changes to subset Z\n is_changed = True if changed else is_changed\n # Fit model over rows of subset Z\n self.knn.fit ( X [ subset_Z ], y [ subset_Z ] )\n # End fit()", "def K_Nearest_Neighbours_Model(train_features, train_labels, k_value=5, algorithm_auto=\"auto\"):\n # create an instance of the KNN SciKit learn class\n model = KNeighborsClassifier(n_neighbors=k_value, algorithm=algorithm_auto)\n # fit the model to the training data and labels\n model.fit(train_features, train_labels.values.ravel())\n # return the .fit() model\n return model", "def fit(self, X):\n self._reset_state()\n # total number of samples\n total_sample_num = X.shape[0]\n # train in an online form\n for i in range(total_sample_num):\n # input sample\n x = X[i, :]\n self.input_signal(x)\n # self.labels_ = self.__label_samples(X)\n self.__classify()\n # plt.show()\n return self", "def fit(self, x):\n pass", "def fit(self, X):\n\n # prochazi jednotlive iterace\n for i in range(self._num_iterations):\n # trenuje kazdy neuron samostatne\n for x in X:\n self._sess.run(self._training, feed_dict={self._X: x, self._iter: i})\n\n # Store a centroid grid for easy retrieval later on\n centroid_grid = [[] for i in range(self._m)]\n self._Wts = list(self._sess.run(self._W))\n self._locations = list(self._sess.run(self._topography))\n for j, loc in enumerate(self._locations):\n centroid_grid[loc[0]].append(self._Wts[j])\n self._centroid_grid = centroid_grid\n\n self._learned = True\n\n if i % 10 == 0:\n print('iteration: ' + str(i) + '/' + str(self._num_iterations))", "def fit(self, X, y, N_epochs = 500):\n\t\tself.GMM_model = GMM(X, self.K)\n\t\tself.GMM_model.fit(tol=1e-3)\n\n\t\t\t#fitting MoE models\n\t\tself.MoE_list = []\n\t\tfor i in range(self.K):\n\t\t\t\t#looking for data in considered cluster\n\t\t\tindices = self.GMM_model.get_cluster_indices(y,i)\n\t\t\tinputs = Input(shape=(X.shape[1],))\n\t\t\thidden = DenseMoE(self.D, self.exp_list[i], expert_activation='linear', gating_activation='softmax')(inputs)\n\t\t\tmodel = Model(inputs=inputs, outputs=hidden)\n\t\t\tmodel.compile(optimizer = 'rmsprop', loss = 'mse')\n\t\t\thistory = model.fit(x=X[indices,:], y=y[indices,:], batch_size=64, epochs=N_epochs, shuffle=True, verbose=0)\n\t\t\tprint(\"Train model loss for cluster \"+str(i), model.evaluate(X[indices,:], y[indices,:]))\n\t\t\tself.MoE_list.append(model.copy()) #can result in troubles...\n\n\t\treturn", "def fit(self, data, labels, n_epochs=20):\n self.model.fit(x=data, y=labels, batch_size=self.batch_size, \n validation_split=0.1 if self.early_stopping else None, epochs=n_epochs,\n callbacks=[self.es] if self.early_stopping else None)", "def train_knn(training_data):\n return knnclassifier(training_data, keys, 3)", "def fit(self,\n model,\n x=None,\n y=None,\n batch_size=None,\n epochs=1,\n verbose=1,\n callbacks=None,\n validation_split=0.,\n validation_data=None,\n shuffle=True,\n class_weight=None,\n sample_weight=None,\n initial_epoch=0,\n steps_per_epoch=None,\n validation_steps=None,\n validation_freq=1,\n **kwargs):\n raise NotImplementedError()", "def fit_full(self, K):\r\n self.K = K.copy()\r\n\r\n #Find mode\r\n self.f_hat = self.rasm_mode(self.K)\r\n\r\n #Compute hessian and other variables at mode\r\n self._compute_likelihood_variables()\r\n\r\n #Compute fake variables replicating laplace approximation to posterior\r\n self._compute_GP_variables()", "def fit(self, X, y):\n self.model = self._initialize_model(X, y)\n self.model.optimize()", "def fit(self, inp, targ):\n self.model.fit(inp, targ, epochs=1, verbose=0)", "def fit(self):\n # initialize parameters with zeros (≈ 1 line of code)\n weight, intercept = initialize_with_zeros(self._x_train.shape[0])\n\n # Gradient descent (≈ 1 line of code)\n parameters, grads, costs = optimize(weight,\n intercept,\n self._x_train,\n self._y_train,\n self.num_iteration,\n self.learning_rate\n )\n\n # Retrieve parameters w and b from dictionary \"parameters\"\n weight = parameters[\"w\"]\n intercept = parameters[\"b\"]\n\n # Predict test/train set examples (≈ 2 lines of code)\n y_prediction_test = predict(weight, intercept, self._x_test)\n y_prediction_train = predict(weight, intercept, self._x_train)\n\n # Print train/test Errors\n print(\"train accuracy: {} %\".format(100 - np.mean(np.abs(y_prediction_train - self._y_train)) * 100))\n print(\"test accuracy: {} %\".format(100 - np.mean(np.abs(y_prediction_test - self._x_test)) * 100))\n\n return {\"costs\": costs,\n \"Y_prediction_test\": y_prediction_test,\n \"Y_prediction_train\": y_prediction_train,\n \"w\": weight,\n \"b\": intercept,\n \"learning_rate\": self.learning_rate,\n \"num_iterations\": self.num_iteration}", "def fit(self, X, y):\n self.model_x = X\n self.model_y = y", "def fit(self, X, Y):\n ...", "def fit(self, X, y):\n self.X_train = X\n self.y_train = y", "def fit(self, X,y):\n pass", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y):", "def train(self, data):\n \n logger('[.] Training with whole dataset ...')\n \n datalist = self.unpack_data(data)\n self.knn_model.fit(datatuple['features'], datatuple['labels'])", "def fit(self, X, y):\n assert y.shape[1] == self.n_classes and y.shape[0] == X.shape[0]\n self.model.fit(X, y, epochs=self.n_iter, batch_size=self.batch_size, verbose=self.verbose)\n return self", "def fit(self):\n converge = False\n while not converge:\n converge = True\n for xi, yi in zip(self.X, self.y):\n yhat = self.classify(xi)\n if yhat != yi:\n converge = False\n # update model\n self.W += self.lr * yi * xi\n self.b += self.lr * yi * 1", "def fit(self, X, y):\n self.classifiers = []\n self.label_count = len(y[0])\n self.sample_models()\n for i in xrange(self.model_count):\n classifier = copy.deepcopy(self.classifier)\n y_subset = self.generate_data_subset(y,self.label_sets[i])\n classifier.fit(X,y_subset)\n self.classifiers.append(classifier)\n\n return self", "def nn_fit(x, y, input_size, hidden_size, num_classes, learning_rate, epochs):\n\n # initialize the model\n model = NNModel(input_size, hidden_size, num_classes)\n\n # our error/loss function\n criterion = nn.BCEWithLogitsLoss()\n\n # define our SGD optimizer\n optimiser = torch.optim.SGD(model.parameters(), lr=learning_rate)\n\n # loop over our epochs, similar to our previous implementations\n for epoch in range(epochs):\n\n # define our feature and response variables\n features = Variable(torch.from_numpy(x).float())\n labels = Variable(torch.from_numpy(y).float())\n\n # clear the gradients\n optimiser.zero_grad()\n\n # calculate the predicted values\n predictions = model.forward(features)\n\n # calculate our loss\n loss = criterion(predictions, labels)\n\n # implement our gradient-based updates to our\n # parammeters (putting them \"back\" into the model\n # via a \"backward\" update)\n loss.backward()\n optimiser.step()\n\n return model", "def find_best_k(x_train, y_train, ks):\n params = {'n_neighbors': ks}\n knn = neighbors.KNeighborsRegressor()\n model = GridSearchCV(knn, params, cv=5)\n model.fit(x_train, y_train)\n best_k = model.best_params_\n return best_k", "def fit(self) -> None:\n start_time = time.time()\n # ---- START -----\n train_df = self.processed_train_df[self.processed_train_df[self.filter_col_name]].dropna()\n train_features = train_df[self.feature_list]\n for label, model in zip(self.label_list, self.models):\n model.fit(train_features, train_df[label])\n # ---- END -----\n end_time = time.time()\n print(\"Finished fitting : elasped time : \" + str(end_time - start_time))", "def do_knn(x_data):\n return True", "def fit(self, X):\n raise NotImplementedError", "def fit(self, data, labels, labels_pred):\n self.n_samples, dim = data.shape\n self.labels_unique = np.unique(labels)\n self.n_classes = len(self.labels_unique)\n if self.n_neighbors is None:\n # Set number of nearest neighbors based on the maximum number of samples per class and the neighborhood\n # constant\n num = 0\n for c in self.labels_unique:\n ind = np.where(labels == c)[0]\n if ind.shape[0] > num:\n num = ind.shape[0]\n\n self.n_neighbors = int(np.ceil(num ** self.neighborhood_constant))\n\n logger.info(\"Number of samples: {:d}. Data dimension = {:d}.\".format(self.n_samples, dim))\n logger.info(\"Number of classes: {:d}.\".format(self.n_classes))\n logger.info(\"Number of neighbors (k): {:d}.\".format(self.n_neighbors))\n logger.info(\"Fraction of outliers (alpha): {:.4f}.\".format(self.alpha))\n if self.model_dim_reduction:\n data = transform_data_from_model(data, self.model_dim_reduction)\n dim = data.shape[1]\n logger.info(\"Applying dimension reduction to the data. Projected dimension = {:d}.\".format(dim))\n\n # Distance from each sample in `data` to the `1 - alpha` level sets corresponding to each class\n distance_level_sets = np.zeros((self.n_samples, self.n_classes))\n self.index_knn = dict()\n self.epsilon = dict()\n indices_sub = dict()\n for j, c in enumerate(self.labels_unique):\n logger.info(\"Processing data from class '{}':\".format(c))\n logger.info(\"Building a KNN index for all the samples from class '{}'.\".format(c))\n indices_sub[c] = np.where(labels == c)[0]\n data_sub = data[indices_sub[c], :]\n self.index_knn[c] = KNNIndex(\n data_sub, n_neighbors=self.n_neighbors,\n metric=self.metric, metric_kwargs=self.metric_kwargs,\n approx_nearest_neighbors=self.approx_nearest_neighbors,\n n_jobs=self.n_jobs,\n low_memory=self.low_memory,\n seed_rng=self.seed_rng\n )\n # Distances to the k nearest neighbors of each sample\n _, nn_distances = self.index_knn[c].query_self(k=self.n_neighbors)\n # Radius or distance to the k-th nearest neighbor for each sample\n radius_arr = nn_distances[:, self.n_neighbors - 1]\n\n # Smallest radius `epsilon` such that only `alpha` fraction of the samples from class `c` have radius\n # greater than `epsilon`\n if self.alpha > 0.:\n self.epsilon[c] = np.percentile(radius_arr, 100 * (1 - self.alpha), interpolation='midpoint')\n\n # Exclude the outliers and build a KNN index with the remaining samples\n mask_incl = radius_arr <= self.epsilon[c]\n mask_excl = np.logical_not(mask_incl)\n num_excl = mask_excl[mask_excl].shape[0]\n else:\n # Slightly larger value than the largest radius\n self.epsilon[c] = 1.0001 * np.max(radius_arr)\n\n # All samples are included in the density level set\n mask_incl = np.ones(indices_sub[c].shape[0], dtype=np.bool)\n mask_excl = np.logical_not(mask_incl)\n num_excl = 0\n\n if num_excl:\n logger.info(\"Excluding {:d} samples with radius larger than {:.6f} and building a KNN index with \"\n \"the remaining samples.\".format(num_excl, self.epsilon[c]))\n self.index_knn[c] = KNNIndex(\n data_sub[mask_incl, :], n_neighbors=self.n_neighbors,\n metric=self.metric, metric_kwargs=self.metric_kwargs,\n approx_nearest_neighbors=self.approx_nearest_neighbors,\n n_jobs=self.n_jobs,\n low_memory=self.low_memory,\n seed_rng=self.seed_rng\n )\n # Distance to the nearest neighbor of each sample that is part of the KNN index\n _, dist_temp = self.index_knn[c].query_self(k=1)\n ind = indices_sub[c][mask_incl]\n distance_level_sets[ind, j] = dist_temp[:, 0]\n\n # Distance to the nearest neighbor of each sample that is not a part of the KNN index (outliers)\n _, dist_temp = self.index_knn[c].query(data_sub[mask_excl, :], k=1)\n ind = indices_sub[c][mask_excl]\n distance_level_sets[ind, j] = dist_temp[:, 0]\n else:\n # No need to rebuild the KNN index because no samples are excluded.\n # Distance to the nearest neighbor of each sample\n distance_level_sets[indices_sub[c], j] = nn_distances[:, 0]\n\n logger.info(\"Calculating the trust score for the estimation data.\")\n for c in self.labels_unique:\n # Compute the distance from each sample from class `c` to the level sets from the remaining classes\n data_sub = data[indices_sub[c], :]\n for j, c_hat in enumerate(self.labels_unique):\n if c_hat == c:\n continue\n\n _, dist_temp = self.index_knn[c_hat].query(data_sub, k=1)\n distance_level_sets[indices_sub[c], j] = dist_temp[:, 0]\n\n self.scores_estim = self._score_helper(distance_level_sets, labels_pred)\n return self", "def fit(self):\n raise NotImplementedError # pragma: no cover", "def fit(self):\r\n self.init_plot(self.FEATURES)\r\n\r\n # YOUR CODE HERE\r\n\r\n while not self.converged() or self.training_iteration == 0:\r\n print('Iteration: ', self.training_iteration)\r\n\r\n self.compute_gradient_for_all()\r\n self.upd_theta()\r\n self.update_plot(np.sum(np.square(self.gradient)))\r\n\r\n self.training_iteration += 1", "def train(self, X_train, y_train):\n self.model.fit(X_train, y_train)", "def knn_prediction(X, y, x, k):\n ## homework:start\n result = \n ## homework:end\n return result", "def _fit(self, X, column, k=10, is_categorical=False):\n clf = None\n if not is_categorical:\n clf = neighbors.KNeighborsRegressor(n_neighbors=k)\n else:\n clf = neighbors.KNeighborsClassifier(n_neighbors=k)\n # use column not null to train the kNN classifier\n missing_idxes = np.where(pd.isnull(X[:, column]))[0]\n if len(missing_idxes) == 0:\n return None\n X_copy = np.delete(X, missing_idxes, 0)\n X_train = np.delete(X_copy, column, 1)\n # if other columns still have missing values fill with mean\n col_mean = None\n if not is_categorical:\n col_mean = np.nanmean(X, 0)\n else:\n col_mean = np.nanmedian(X, 0)\n for col_id in range(0, len(col_mean) - 1):\n col_missing_idxes = np.where(np.isnan(X_train[:, col_id]))[0]\n if len(col_missing_idxes) == 0:\n continue\n else:\n X_train[col_missing_idxes, col_id] = col_mean[col_id]\n y_train = X_copy[:, column]\n # fit classifier\n clf.fit(X_train, y_train)\n return clf", "def train(self, X, y):\n self.model.fit(X, y)", "def fit(self, X, epochs=50):\n self.clusters = [[] for _ in range(self.k)]\n for i in range(X.shape[0]):\n index = random.randint(0, self.k - 1)\n self.clusters[index].append(X[i])\n self.sample_in_cluster.append(index)\n for e in range(epochs):\n #beregn nye centers\n self.estimate_centers()\n #nullstill clusters\n self.reset_clusters()\n #legg til alle punkter på nytt i clusters\n self.make_clusters(X)\n if self.changed == False:\n break", "def fit(self, X, Y):\n if self.model is None:\n print(\"%s.fit: implement me\" % (self.__class__.__name__))", "def fit(self, X, y):\n # Code to fit the model.\n\n train_stuff = self._vectorizer.fit_transform(X, y)\n\n self._classifier.fit(train_stuff, y = y)\n\n\n return self", "def _fit(self):\n loss = 1e10\n weights = self._init_weights\n while loss > self._converge_epsilon:\n d_F = 2 * (self._input.t() * self._input *\n weights - self._input.t() * self._label)\n dd_F = 2 * self._input.t() * self._input\n weights = weights - dd_F.inv() * d_F\n loss = self._mse(weights)\n print('Error : {}'.format(loss))\n return weights", "def fit(self, X, y) :\n\n ### ========== TODO : START ========== ###\n # part b: set self.probabilities_ according to the training set\n # create a dictionary of frequencies and convert to probabilities\n frequencies = Counter(y)\n self.probabilities_ = {key:float(value)/len(y) for (key,value) in frequencies.items()}\n ### ========== TODO : END ========== ###\n\n return self", "def fit(self, X, Y):\n np.random.seed(40)\n self.num_samples=X.shape[0]\n self.layers_sizes.insert(0,X.shape[1])\n self.initialize_parameters()\n variable=self.num_epochs//5\n\n # loop for epochs\n for vv in range(self.num_epochs):\n # creating batches of dataset of specified batch size\n X,Y=shuffle(X,Y,random_state=vv)\n num_batches=X.shape[0]//self.batch_size\n train_x=np.vsplit(X,num_batches)\n train_y=np.vsplit(Y,num_batches)\n train_cost=0\n \n for i in range(num_batches):\n # iterating over batches and applying forward and backward propagation\n # and determining training cost (cross entropy loss) for every batch\n # and averaging them to give a generalised loss\n A,d_collection=self.forward(train_x[i])\n train_cost+=(-np.mean(train_y[i]*np.log(np.transpose(A))))/num_batches\n derivatives=self.backward(train_x[i],train_y[i],d_collection)\n\n self.weight_update(derivatives)\n \n if vv%variable==0:\n print(\"Accuracy score:\",self.score(X,Y))\n \n # adding both training and testing losses in a list to plot in further ques\n self.training_loss_values.append(train_cost)\n test_cost=-np.mean(self.YTEST*np.log(np.transpose(self.predict_proba(self.XTEST))))\n self.testing_loss_values.append(test_cost)\n return self", "def fit(self, x):\n raise NotImplementedError()", "def fit(self, X, y=None, **fit_params):\n dataset = self.get_dataset(X, y)\n (_n_objects, self.n_features_) = dataset[0][0].shape\n NeuralNet.fit(self, X=dataset, y=None, **fit_params)", "def kNN_train(self, x_train, y_train, x_test, k = 5, processing = None, distMethod = \"Manhattan\"):\n y_test = list()\n\n if processing == \"Scalar\":\n # print(\"Preprocessing = Scalar\")\n stdScalar = preprocessing.StandardScaler().fit(x_train)\n x_train = stdScalar.transform(x_train)\n x_test = stdScalar.transform(x_test)\n\n elif processing == \"MinMax\":\n\n # print(\"Preprocessing = MinMax\")\n mmScalar = preprocessing.MinMaxScaler()\n x_train = mmScalar.fit_transform(x_train)\n x_test = mmScalar.fit_transform(x_test)\n\n elif processing == \"None\":\n self.true = True\n # print(\"No Preprocessing\")\n\n else:\n print(\"wrong processing\")\n exit()\n\n for i in range(0, len(x_test)):\n y_test_temp = list()\n zeroCount = 0\n oneCount = 0\n\n # find distance of a instance in test test to all instances in training set\n for j in range(0, len(x_train)):\n if distMethod == \"Manhattan\":\n y_test_temp.append(self.manhattan(x_train[j], x_test[i]))\n elif distMethod == \"Euclidean\":\n y_test_temp.append(self.euclidean(x_train[j], x_test[i]))\n else:\n print \"something wrong with distance calculation\"\n exit()\n\n # take indices of k nearest points\n # print y_test_temp\n temp = np.asarray(y_test_temp).argsort()[:k]\n # check class of each of k nearest points\n for tmp in temp:\n if y_train[tmp] == 0:\n zeroCount += 1\n elif y_train[tmp] == 1:\n oneCount += 1\n else:\n print(\"something wrong in counting\")\n\n # classify\n if zeroCount >= oneCount:\n y_test.append(int(0))\n elif oneCount > zeroCount:\n y_test.append(int(1))\n else:\n print(\"somethign wrong\")\n\n # print y_test\n return y_test", "def fit(self, X):\n self.n_samples = X.shape[0]\n self.u = random.uniform(key=self.key, shape=(\n self.n_samples, self.n_clusters))\n self.u = self.u / np.tile(self.u.sum(axis=1)\n [np.newaxis].T, self.n_clusters)\n for iteration in range(self.max_iter):\n u_old = self.u.copy()\n self.centers = FCM._next_centers(X, self.u, self.m)\n self.u = self.__predict(X)\n # Stopping rule\n if np.linalg.norm(self.u - u_old) < self.error:\n break", "def fit(self, data, labels):\n self.clf.fit(data, labels)", "def knn(trainingSetData, testSetData, k):\n trainingSet = trainingSetData.drop([14], axis=1) # drop income\n testSet = testSetData.drop([14], axis=1) # drop income\n\n distances = {}\n # this will store the distances re-sorted in ascending/descending order\n sort = {}\n # income band results (>=50k or <50K)\n incomePredictions = []\n\n # Calculating euclidean distance between each row of training data and test data instance\n for testInstance in range(len(testSet)): # len(testSet)\n \n # Store current test Point:\n testInstance = testSet.iloc[testInstance] \n \n distances = euclideanDistanceRow(testInstance, trainingSet)\n\n # sort the distances in order of smallest first:\n sorted_d = sorted(distances.items(), key=lambda x: x[1], reverse=False)\n\n neighbors = []\n\n # Extracting top k neighbors\n for x in range(k):\n neighbors.append(sorted_d[x])\n\n\n classVotes = {}\n\n # Calculating the most freq class in the neighbors\n results = {\"lessThan50\": 0, \"moreThan50\": 0}\n\n # creating a dataframe to which we will add the income values:\n\n for x in range(len(neighbors)):\n if (trainingSetData.iloc[neighbors[x][0]][14] == 0.0):\n results[\"lessThan50\"] += 1\n elif (trainingSetData.iloc[neighbors[x][0]][14] == 1.0):\n results[\"moreThan50\"] += 1\n\n print('results',results)\n\n if (results[\"lessThan50\"] > results[\"moreThan50\"]):\n incomePredictions.append(0.0)\n elif (results[\"lessThan50\"] < results[\"moreThan50\"]):\n incomePredictions.append(1.0)\n\n return incomePredictions", "def fit(self):\n raise NotImplementedError('')", "def knn(X, Y, seed):\n model = neighbors.KNeighborsClassifier(algorithm='auto')\n param_grid = {'n_neighbors': [1, 5, 9, 13, 17, 21, 25, 29, 33, 37], 'weights': ['uniform', 'distance']}\n \n # Grid search on the parameters, to find the best score.\n k = 3\n split = StratifiedShuffleSplit(n_splits=k, random_state=seed)\n search = GridSearchCV(model, param_grid, cv=split, scoring=\"f1\")\n search.fit(X,Y)\n\n score = search.best_score_\n model = search.best_estimator_\n print(\"score={}\\nModel: {}\".format(score, model))\n \n return model", "def fit(self,\n X_train,\n y_train, \n X_test, \n y_test):\n \n #instantiate path_model_dirs dictionary so we can know where the models are saved\n self.path_model_dirs = {}\n\n for key in self.models_dict.keys():\n \n if self.verbose >=1: print('\\n----',key,'----')\n\n #define model directory\n path_model_dir = _os.path.join(self.path_GridSearchCV_dir, key)\n self.path_model_dirs[key] = path_model_dir\n if self.verbose >=1: print('path_model_dir:',path_model_dir)\n \n model_type = type(self.models_dict[key]['model'])\n if 'sklearn' in str(model_type) or 'xgboost' in str(model_type):\n path_file = _os.path.join(path_model_dir,'model_dict.dill')\n elif 'Net' in key:\n path_file = _os.path.join(path_model_dir,'best_params_.dill')\n\n if self.retrain or _os.path.isfile(path_file)==False:\n self.models_dict[key] = self._single_model_GridSearchCV(self.models_dict[key], \n X_train, y_train, \n X_test, y_test,\n path_model_dir)\n\n else: #reload previously trained model\n if 'sklearn' in str(type(self.models_dict[key]['model'])):\n self.models_dict[key] = self.load('model_dict', 'dill', path_model_dir)\n elif 'Net' in key:\n #check kwargs for epochs\n epochs = 100\n for item in self.kwargs.items():\n if 'epochs' in item[0]: epochs = item[1]\n self.models_dict[key] = self.load_NeuralNet(path_model_dir, \n X_train, y_train, \n epochs)\n\n y_pred = self.models_dict[key]['best_model'].predict(X_test)\n\n if 'Net' not in key:\n self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].score(X_test, y_test)\n else:\n self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].evaluate(X_test, y_test, verbose =0)\n \n if self.verbose >=1:\n print('\\tbest_cv_score:',self.models_dict[key]['best_cv_score'])\n print('\\tbest_pred_score:',self.models_dict[key]['best_pred_score'])\n\n for metric_key in self.metrics.keys():\n if self.metrics[metric_key] !=None:\n try:\n self.models_dict[key][metric_key] = self.metrics[metric_key](y_test, y_pred)\n print('\\t',metric_key,':',self.models_dict[key][metric_key])\n except Exception as e:\n print('Exception occured for',metric_key,':',str(e))\n\n if 'sklearn' in str(type(self.models_dict[key]['model'])):\n self.save(self.models_dict[key], 'model_dict', 'dill', path_model_dir)\n elif 'Net' in key:\n model_dict_subset = self.models_dict[key].copy()\n for key in self.models_dict[key].keys():\n if key not in ['y_test','y_pred','best_pred_score'] +list(self.metrics.keys()):\n model_dict_subset.pop(key)", "def fit(self, X, y=None):\n # train on a training dataset\n self.logger.info(\n self.__name__ + ' is trained on {:d} samples with {:d} features.'.format(X.shape[0], X.shape[1]))\n pass", "def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 20, **kwargs) -> None:\n raise NotImplementedError", "def knn(X_tr_sc, n_neighbors, radius): \n\n neigh = NearestNeighbors(n_neighbors, radius,metric='euclidean')\n model = neigh.fit(X_tr_sc) \n \n return model", "def train_model(evidence, labels):\n # create a model to represent the k-nearest neighbor model and return the fitted model\n model = KNeighborsClassifier(n_neighbors=1)\n return model.fit(evidence, labels)", "def fit(self, X_train, y_train, **kwargs):\n X_train[\"label_prediction\"] = self._base_model.predict(X_train)\n\n self._features = X_train.columns.values\n self._model.fit(X_train, y_train, **kwargs)", "def fit(self, train_matrix, train_label, sample_weight):\r\n raise NotImplementedError", "def fit(self, X_train, y_train):\n for i in range(self.N):\n h = RandomDecisionTree(candidate_splits=self.candidate_splits, depth=self.max_depth)\n h = h.fit(*self.bootstrap(X_train, y_train))\n self.learners.append(h)", "def fit(self, X, Y):\n K = len(np.unique(Y))\n p = np.shape(X)[1]\n n = np.shape(X)[0]\n conditional = []\n # Class conditional distribution parameters (Laplace smoothing)\n prior = []\n # Class prior distribution parameters (MLE)\n for label in xrange(K):\n indices = np.where(Y == label + 1)[0]\n temp_split = X[indices, :]\n temp_count = np.shape(temp_split)[0]\n prior.append(1. * temp_count / n)\n temp_sum = np.apply_along_axis(sum, 0, temp_split.toarray())\n conditional.append(1. * (1 + 1. * temp_sum) / (2 + temp_count))\n self.model_prior = prior\n self.model_conditional = conditional\n return self", "def fit(self, X, y):\n Xs = self.scaler.fit_transform(X)\n self.model.fit(Xs, y)", "def fit(self, x, y):\n raise NotImplementedError('Subclass of LinearModel must implement fit method.')", "def fit(self, X, y, **fit_params):\n ..." ]
[ "0.82824767", "0.7297423", "0.7242588", "0.7187846", "0.69953674", "0.69056904", "0.689061", "0.6822247", "0.6819599", "0.67986625", "0.67709565", "0.66812265", "0.66537255", "0.66494197", "0.66305053", "0.66141015", "0.6599681", "0.65994424", "0.6562805", "0.654726", "0.65240186", "0.6522693", "0.65151376", "0.65135604", "0.64931494", "0.6492516", "0.6481181", "0.6469191", "0.6448132", "0.6447028", "0.6444352", "0.6442844", "0.64372265", "0.64351004", "0.64333385", "0.6413151", "0.6398177", "0.63828963", "0.63750917", "0.6372392", "0.636778", "0.6364673", "0.6363981", "0.635704", "0.63495004", "0.6336798", "0.6336798", "0.6336798", "0.6336798", "0.6336798", "0.6336798", "0.6336798", "0.6336798", "0.6336798", "0.6336798", "0.6336292", "0.6336292", "0.6336292", "0.63260835", "0.62949985", "0.6294983", "0.6285882", "0.6276956", "0.62454903", "0.6242049", "0.62417966", "0.6233901", "0.61921746", "0.6191304", "0.61778134", "0.61731285", "0.6172461", "0.61717385", "0.6159848", "0.61523473", "0.61511886", "0.6148723", "0.6148496", "0.6146532", "0.6144688", "0.6133097", "0.6130923", "0.6130562", "0.6129253", "0.6121714", "0.6106462", "0.61059797", "0.6100394", "0.6095953", "0.6090798", "0.6084521", "0.60835195", "0.60817397", "0.60757923", "0.6063015", "0.6061863", "0.60519254", "0.6041853", "0.60350114", "0.60237336" ]
0.7031433
4
Return the accuracy attained by the knn on the test data set.
def score_one(self, test_data): test_in, test_labels = self._split_inputs_outputs(test_data) correct = 0 total = 0 for i, test_input in enumerate(test_in): prediction = self.model.predict(test_input.reshape(1,-1)) if prediction[0] == test_labels[i]: correct+=1 total+=1 return float(correct)/total
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getAccuracy(self):\n\t\tcorrect = (self.testing[self.classLabel]==self.bestLabel).sum()\n\t\tself.accuracy = (correct/float(len(self.testing))) * 100.0", "def get_accuracy(self) -> float:\n self.network.load_data()\n self.network.train()\n\n n = len(self.network.y_test)\n correct = 0\n for i in range(n):\n # Predict by running forward pass through the neural network\n pred = self.network.predict(self.network.x_test[i])\n # Sanity check of the prediction\n assert 0 <= pred <= 1, \"The prediction needs to be in [0, 1] range.\"\n # Check if right class is predicted\n correct += self.network.y_test[i] == round(float(pred))\n return round(correct / n, 3)", "def accuracy(y_test, y_pred):\n\treturn accuracy_score(y_test, y_pred)", "def accuracy(self, X_train, X_test):\n loss, accuracy = self.estimator.evaluate(X_test, X_train)\n return accuracy", "def accuracy(self, X_test, y_test):\n\t\tif X_test.ndim == 1:\n\t\t\tX_test = np.reshape(X_test, (X_test.shape[0],1))\n\t\ty_pred = self.predict(X_test)\n\t\treturn np.sum(np.argmax(y_pred,axis=1)==np.argmax(y_test,axis=1))/float(y_test.shape[0])", "def findOveralAccuracy(trainData,testData):\r\n kNNClassifier = kNN(trainData)\r\n \r\n All_Predictions = kNNClassifier.classify(testData,k=5)\r\n \r\n reference_dictionary = testData.dataDict['Species']\r\n\r\n Overall_Accuracy = 100*sum(reference_dictionary== All_Predictions)/len(All_Predictions)\r\n \r\n return All_Predictions, Overall_Accuracy", "def testAccuracy(self):\n \n loader = torch.utils.data.DataLoader(dataset=self.test, \n shuffle=False)\n acc = accuracy(self.model, loader)\n self.assertEqual(acc, 1.0)\n print(acc)", "def train_accuracy(self):\n # Train accuarcy\n add = np.ones(len(self.X_train))\n X_add1 = np.c_[add, self.X_train]\n pred_train = np.dot(X_add1, self.w_result.T)\n pred_train[pred_train > 0] = 1\n pred_train[pred_train < 0] = 0\n print(pred_train)\n train_check_lable = np.isclose(pred_train, self.y_train)\n num_true_lable = np.sum(train_check_lable)\n num_all_lable = np.size(train_check_lable)\n train_accuracy = num_true_lable / num_all_lable\n print(\"train_accuracy is: %f\" %train_accuracy)\n return train_accuracy", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.data.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def accuracy(self):\r\n # Load tarined model using intent id.\r\n clf = joblib.load(filename=self.intention_id+'.pkl')\r\n # Compute accuracy for hole training data and return.\r\n return clf.score(X=self.training_data, y=self.target_data)", "def accuracy(predictions, test_labels):\n return f1_score(test_labels, predictions, average='micro') * 100", "def check_test_accuracy(self):\n print('\\n# Evaluate on test data')\n results = self.model.evaluate(self.data.test_dataset)\n print('\\ntest loss, test acc:', results)", "def compute_accuracy(self):\n if not self.is_training:\n logits = self.test_logits\n labels = self.data.test_labels\n else:\n logits = self.train_logits\n labels = self.data.labels\n\n predictions = tf.cast(tf.argmax(logits, 1), tf.int32)\n correct = tf.equal(labels, predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def accuracy(self):\n\t\treturn self.accuracy_", "def test(self):\n self.load()\n bottleneck_features = np.load(self.feature_path)\n test = bottleneck_features['test']\n _, test_targets = load_dataset(self.image_path_test) \n predictions = [np.argmax(self.model.predict(np.expand_dims(feature, axis=0))) for feature in test]\n test_accuracy = 100*np.sum(np.array(predictions) == np.argmax(test_targets, axis=1))/len(predictions)\n print('{}, test accuracy: {:.4f}%'.format(self.name, test_accuracy))\n return test_accuracy", "def get_test_accuracy(model, X_test, y_test):\n # Make predictions - test accuracy\n test_pred = model.predict(X_test)\n score = accuracy_score(test_pred, y_test)\n print(\"Test Accuracy:\", score)\n\n return test_pred", "def accuracy(cls, test_labels):\n N = len(test_labels)\n\n # Calculate total correct as precentage\n total_correct = 100*(N - np.count_nonzero(cls - test_labels))/N\n\n # Calculate precentag correct for each class\n lab = np.unique(test_labels)\n cls_correct = {}\n for label in lab:\n idx = np.where(test_labels == label)[0]\n N_cls = len(idx)\n cls_correct[label] = 100*(N_cls - np.count_nonzero(label -\n cls[idx]))/N_cls\n\n print(\"Accuracy for:\")\n print(\"All classes is %.2f%%\" % total_correct)\n for label in lab:\n print(\"Class %d is %.2f%%\" % (label, cls_correct[label]))\n return(total_correct, cls_correct)", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.episode.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def accuracy(preds, labels):\n correct = preds == labels\n return correct.sum().float() / correct.shape[0]", "def accuracy(self):\n total_predictions = self.tp + self.fp + self.tn + self.fn;\n return float(self.tp + self.tn) / total_predictions if total_predictions != 0 else 1", "def accuracy(self):\n return (self.table[0, 0] + self.table[1, 1]) / self.N", "def accuracy(self):\r\n return self._feature.attributes[self._schema.accuracy]", "def get_accuracy(self, k=None):\n k = 1 if k is None else k\n n_correct = 0\n \n for query, answer in tqdm(zip(self.test_queries, self.results)):\n correct_set = self.correct_answers[query]\n is_correct = False\n for candidate in answer[:k]:\n if candidate in correct_set:\n is_correct = True\n break\n n_correct += int(is_correct)\n \n return n_correct / len(self.test_queries)", "def accuracy(pred, target):\n N = pred.shape[0]\n return (pred == target).sum() * 1.0 / N", "def accuracy(self):", "def getaccuracy(features: ndarray, target: ndarray, trained_model) -> float:\n predictions = trained_model.predict(features)\n\n accuracy = accuracy_score(target, predictions, normalize=True)\n\n return accuracy", "def accuracy(self):\n if not self.run:\n self._run()\n return self.model_acc", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n return [correct[:k].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]", "def get_accuracy(test_sets, predictions, class_index):\n actual_classes = [test_set[class_index] for test_set in test_sets]\n\n num_correct = sum(int(actual == prediction) for actual, prediction in zip(actual_classes, predictions))\n\n return float(num_correct) / len(test_sets)", "def accuracy(predictions, targets):\n return accuracy", "def accuracy(classifier_output, true_labels):\n\n # TODO: finish this.\n conf_matrix = confusion_matrix(classifier_output, true_labels)\n return (conf_matrix[0][0]+conf_matrix[1][1])/(conf_matrix[0][0] + conf_matrix[0][1]\\\n + conf_matrix[1][0] + conf_matrix[1][1])", "def accuracy(self, X, y):\n pred_labels = self.predict(X)\n return np.sum(pred_labels == y) / pred_labels.shape[0]", "def accuracy(y_true, y_pred):\n assert y_true.shape == y_pred.shape\n return (y_true == y_pred).mean()", "def show_accuracy(self):\r\n return round(accuracy_score(self.actual, self.predicted),2)", "def accuracy(predicted, ground_truth):\n predicted_labels_decoded = np.argmax(predicted, axis=1)\n ground_truth_labels_decoded = np.argmax(ground_truth, axis=1)\n correct_rate = [1 if pred == truth else 0 for (pred, truth) in\n zip(predicted_labels_decoded, ground_truth_labels_decoded)]\n accuracy = sum(correct_rate) / ground_truth_labels_decoded.size\n return accuracy * 100", "def accuracy(self, X_train, y_train):\n y_train_pred = self.predict(X_train)\n diffs = y_train_pred - y_train\n count = 0.\n for i in range(y_train.shape[0]):\n if diffs[i] != 0:\n count+=1\n return 100 - count*100/y_train.shape[0]", "def accuracy(output, target, topk=(1,)):\n corrrect_ks = correct(output, target, topk)\n batch_size = target.size(0)\n return [correct_k.float().mul_(100.0 / batch_size) for correct_k in corrrect_ks]", "def accuracy(output, target, topk=(1,)):\n corrrect_ks = correct(output, target, topk)\n batch_size = target.size(0)\n return [correct_k.float().mul_(100.0 / batch_size) for correct_k in corrrect_ks]", "def accuracy(output, target): # , topk=(1,)):\n correct = 0\n batch_size = target.size(0)\n for i in range(batch_size):\n tar = target[i].data.cpu().numpy()\n pred = output[i].data.cpu().numpy()\n if (tar) == np.argmax(pred):\n correct += 1\n return float(correct/batch_size)", "def accuracy(gt, pred):\n \n return np.mean(gt == pred)", "def accuracy(output, target, topk=(1,)):\n\tmaxk = max(topk)\n\tbatch_size = target.size(0)\n\n\t_, pred = output.topk(maxk, 1, True, True)\n\tpred = pred.t()\n\tcorrect = pred.eq(target.view(1, -1).expand_as(pred))\n\n\tres = []\n\tfor k in topk:\n\t\tcorrect_k = correct[:k].view(-1).float().sum(0)\n\t\tres.append(correct_k.mul_(100.0 / batch_size))\n\treturn res", "def overall_accuracy(y_true, y_pred):\n pred_flat, true_flat = y_pred.flatten(), y_true.flatten()\n intersection = list(pred_flat == true_flat).count(True)\n sum_ = len(true_flat)\n accuracy = round(intersection/sum_, 4)\n return accuracy", "def test(xtest, ytest, neural_net):\n loss, accuracy = neural_net.evaluate(xtest, ytest, verbose=0)\n return accuracy", "def test(self):\n self.eval()\n test_mask = self.data.test_mask\n labels = self.data.y\n output = self.forward(self.data)\n # output = self.output\n loss_test = F.nll_loss(output[test_mask], labels[test_mask])\n acc_test = utils.accuracy(output[test_mask], labels[test_mask])\n print(\"Test set results:\",\n \"loss= {:.4f}\".format(loss_test.item()),\n \"accuracy= {:.4f}\".format(acc_test.item()))\n return acc_test.item()", "def accuracy(output, target, topk=(1,)):\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n\r\n return res", "def accuracy(output, target, topk=(1,)):\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n return res", "def accuracy(output, target, topk=(1,)):\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n return res", "def test_nn_predicts_accurate_results(self):\n self.nn.train_nn(self.X_train, self.y_train, 6, 10, 0.06)\n accuracy = 0\n X_test, y_test = load_data(\"../data/testdata.mat.tar.gz\")\n for i in range(len(X_test[:100])):\n out = self.nn.forward_prop(X_test[i])[0][-1]\n if np.argmax(out) == np.where(y_test[i])[0][0]:\n accuracy += 1\n else:\n print(\"Incorrect\", np.argmax(out))\n print(\"accuracy: \", accuracy)\n self.assertGreaterEqual(accuracy, 70)", "def accuracy(y_pred, y_actual, topk=(1, )):\n maxk = max(topk)\n batch_size = y_actual.size(0)\n\n _, pred = y_pred.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(y_actual.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n pred = pred.type_as(target)\n target = target.type_as(pred)\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].contiguous().view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(labels, preds):\n\tassert labels.shape[0]==preds.shape[0]\n\treturn np.sum(preds==labels)/float(labels.shape[0])", "def accuracy(actual, predicted):\n return np.sum(predicted == actual) / actual.shape[0]", "def test(model, X_test, y_test, config):\n loss, y_pred = model.forward_pass(X_test)\n\n y_maxVals = np.amax(y_pred, axis=1).reshape(-1, 1)\n y_1hot = np.where(y_maxVals == y_pred, 1, 0)\n correct = np.sum(y_test * y_1hot)\n\n accuracy = correct / len(X_test)\n return accuracy", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n\n return res", "def accuracy_score(y_true, y_pred):\n accuracy = np.sum(y_true == y_pred, axis=0) / len(y_true)\n return accuracy", "def accuracy_score(y_true, y_pred):\n accuracy = np.sum(y_true == y_pred, axis=0) / len(y_true)\n return accuracy", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n\n return res", "def accuracy_score(y_true, y_pred):\n\ttp, fn, fp, tn = confusion_matrix(y_true, y_pred, table_show=False)\n\n\treturn (tp+tn) / (tp+tn+fn+fp)", "def get_accuracy(pred, test_label, regression= \"logistic\"):\n if regression == \"multiclass\":\n pred_max = np.argmax(pred, axis=1)\n gt_max = np.argmax(test_label, axis=1)\n acc = np.sum(pred_max == gt_max)*100.0/pred.shape[0]\n elif regression == \"logistic\" or regression == \"probit\":\n if pred.ndim == 2:\n pred = pred[:,0]\n pred[pred >= 0.5] = 1.0\n pred[pred < 0.5] = 0.0\n acc = np.sum(pred == test_label)*100.0/pred.shape[0]\n\n return acc", "def test_accuracy(y, tx, w):\n labels = predict_regression_labels(w, tx)\n \n return (labels==y).sum()/len(y)", "def accuracy(output, target, topk=(1, 5)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def get_accuracy(self, predicted_y, actual_y, log_tests=False):\n if log_tests:\n for i in range(actual_y.shape[0]):\n print 'predicted = {0}, actual = {1}'.format(predicted_y[i], actual_y[i])\n return float(sum(predicted_y == actual_y)) / predicted_y.shape[0]", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(1. / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True) # pred是top k的索引值\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred)) # target每个样本只有一个值,表示具体类别值,expand之后比较是否相等,相等的就是对的\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) # top几的分类正确数量累加,然后除以batch_size就是准确率\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res, pred", "def knn(train_data, train_labels, test_data, test_labels, k):\n pred_labels = []\n for t in test_data:\n dist = calculate_distances(train_data, t)\n pred_class = majority_voting(dist, train_labels, k)\n pred_labels.append(pred_class)\n correct_pred_count = np.sum(pred_labels == test_labels)\n acc = correct_pred_count/len(test_labels)\n return acc" ]
[ "0.81783843", "0.807551", "0.80026567", "0.8001903", "0.78935206", "0.77839744", "0.7777315", "0.7738132", "0.7703403", "0.76944774", "0.7688138", "0.76269644", "0.7613137", "0.7607058", "0.75859165", "0.757926", "0.7487052", "0.74865884", "0.7449901", "0.7419433", "0.73826903", "0.7374445", "0.7367807", "0.736219", "0.7359248", "0.7349097", "0.73135257", "0.73052466", "0.7278306", "0.7270204", "0.72568625", "0.72486", "0.7227708", "0.72216123", "0.7214568", "0.7202854", "0.72026074", "0.72026074", "0.7191624", "0.71915644", "0.71865654", "0.7179947", "0.7142064", "0.7136794", "0.71309555", "0.71275884", "0.71275884", "0.71200454", "0.7119808", "0.71188277", "0.71188", "0.71061075", "0.7105061", "0.7094871", "0.709279", "0.709279", "0.709279", "0.709279", "0.709279", "0.709279", "0.709279", "0.709279", "0.709279", "0.709279", "0.709279", "0.709279", "0.709279", "0.709279", "0.709279", "0.709279", "0.709279", "0.7090395", "0.70877326", "0.7086838", "0.7085098", "0.7085098", "0.7085098", "0.7085098", "0.7085098", "0.7085098", "0.7085098", "0.7085098", "0.7085098", "0.7085098", "0.7085098", "0.7085098", "0.70841277", "0.7083819", "0.7083819", "0.70764995", "0.70760036", "0.7075415", "0.70695686", "0.70670676", "0.70660084", "0.7064988", "0.70639247", "0.7058385", "0.7055393", "0.70550466", "0.7054841" ]
0.0
-1
Use 10fold CV to produce an average score
def score(self): splits = 10 score = 0 kf = KFold(n_splits=splits, shuffle=True) kf.get_n_splits(self.data) for train_ind, test_ind in kf.split(self.data): train = [self.data[ind] for ind in train_ind] test = [self.data[ind] for ind in test_ind] self.model = self._fit(train) temp_score = self.score_one(test) score += temp_score return score/float(splits)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cv_score(clf, x, y, score_func):\r\n result = 0\r\n nfold = 5\r\n for train, test in KFold(y.size, nfold): # split data into train/test groups, 5 times\r\n clf.fit(x[train], y[train]) # fit\r\n result += score_func(clf, x[test], y[test]) # evaluate score function on held-out data\r\n return result / nfold # average\r", "def trainaccurary(C):\r\n acc=np.zeros((nFolds))\r\n for i in range(0,nFolds):\r\n \r\n train_x, train_y, valid_x, valid_y=get_next_train_valid(i)\r\n w,b=svmfit(i,C)\r\n y_predict=predict(train_x,w,b)\r\n train_y=train_y.reshape(len(train_y),1)\r\n k=np.sum(np.absolute(y_predict-train_y))/2\r\n n=len(train_y)\r\n acc[i]=1-k/n\r\n \r\n accurary=np.mean(acc)\r\n \r\n return accurary", "def testaccurary(C):\r\n acc=np.zeros((nFolds))\r\n for i in range(0,nFolds):\r\n \r\n w,b=svmfit(i,C)\r\n y_predict=predict(test_x,w,b)\r\n k=np.sum(np.absolute(y_predict-test_y))/2\r\n n=len(test_y)\r\n acc[i]=1-k/n\r\n \r\n accurary=np.mean(acc)\r\n \r\n return accurary", "def algo_CVmetrics(classifier_object, X_train, Y_train):\r\n \r\n cv = RepeatedStratifiedKFold(n_splits = 5, n_repeats = 3, random_state = seed_custom)\r\n \r\n metricslist = {'f2': make_scorer(metrics.fbeta_score, beta = 2), \r\n 'balacc': make_scorer(metrics.balanced_accuracy_score),\r\n 'precision': make_scorer(metrics.precision_score),\r\n 'recall': make_scorer(metrics.recall_score)}\r\n \r\n cv_results = cross_validate(classifier_object, X_train, Y_train, cv = cv, scoring = metricslist, return_estimator = True)\r\n \r\n f2_mean = np.mean(cv_results['test_f2'])\r\n f2_std = np.std(cv_results['test_f2'])\r\n \r\n balacc_mean = np.mean(cv_results['test_balacc'])\r\n balacc_std = np.std(cv_results['test_balacc'])\r\n\r\n precision_mean = np.mean(cv_results['test_precision'])\r\n precision_std = np.std(cv_results['test_precision'])\r\n \r\n recall_mean = np.mean(cv_results['test_recall'])\r\n recall_std = np.std(cv_results['test_recall'])\r\n \r\n scorebox = pd.DataFrame(np.zeros((1,8)), columns = list(['F2-Score Mean', 'F2-Score STD', 'Balanced Accuracy Mean', 'Balanced Accuracy STD',\r\n 'Precision Mean', 'Precision STD', 'Recall Mean', 'Recall STD']))\r\n \r\n scorebox.iloc[0,0] = f2_mean\r\n scorebox.iloc[0,1] = f2_std\r\n scorebox.iloc[0,2] = balacc_mean\r\n scorebox.iloc[0,3] = balacc_std\r\n scorebox.iloc[0,4] = precision_mean\r\n scorebox.iloc[0,5] = precision_std\r\n scorebox.iloc[0,6] = recall_mean\r\n scorebox.iloc[0,7] = recall_std \r\n \r\n scorebox = np.round(scorebox, 3)\r\n \r\n print(\"Model has a mean CV balanced accuracy of {0}, (Std: {1})\".format(round(balacc_mean,3), round(balacc_std,3)))\r\n print(\"Model has a mean CV F2_Score of {0}, (Std: {1})\".format(round(f2_mean,3), round(f2_std,3)))\r\n print(\"Model has a mean CV Precision of {0}, (Std: {1})\".format(round(precision_mean,3), round(precision_std,3)))\r\n print(\"Model has a mean CV Recall of {0}, (Std: {1})\".format(round(recall_mean,3), round(recall_std,3)))\r\n \r\n return scorebox", "def quick_score(clf, X, y, cv=5, n_jobs=20):\n\n return jjcross_val_score(clf, X, y, mean_absolute_error, cv, n_jobs=n_jobs).mean()", "def svm_cv(self, nsplits: int = 5) -> (float, float, float):\r\n c_cand = [0.1, 0.5, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 50, 100]\r\n cv = KFold(n_splits=nsplits)\r\n acc_result = []\r\n for c in c_cand:\r\n acc_result_c = []\r\n for train, test in cv.split(self.x):\r\n x_train = self.x[train, :]\r\n x_test = self.x[test, :]\r\n y_train = self.y[train]\r\n y_test = self.y[test]\r\n model = SVC(C=c, gamma='auto').fit(x_train, y_train)\r\n y_predict = model.predict(x_test)\r\n acc_result_c.append(binary_acc(y_test, y_predict))\r\n acc_result.append(np.mean(acc_result_c))\r\n best_c = c_cand[acc_result.index(max(acc_result))]\r\n return max(acc_result), np.std(acc_result), best_c", "def get_mean_cv_score(model, X, y, scoring, cv, n_jobs, verbose):\n \n mean_cv_score = cross_val_score_torch(model, X, y, scoring, cv, n_jobs, verbose).mean()\n return mean_cv_score", "def roc_score(x, y, model):\n # We use k-fold cross-validation and average the scores.\n kfold = KFold(n_splits=5)\n scores = []\n for train_index, test_index in kfold.split(x):\n x_train = x[train_index]\n y_train = y[train_index]\n x_test = x[test_index]\n y_test = y[test_index]\n score = roc_auc_score(\n y_test, make_prediction(x_train, y_train, x_test, model))\n scores.append(score)\n return np.mean(scores)", "def cv(data, folds, model):\n def rmsle(predicted, actual):\n # Root Mean Squared Logarithmic Error\n return mean_squared_error(\n np.log(predicted+1),\n np.log(actual+1)\n ) ** 0.5\n\n errors = []\n print \" Cross Validation in progress...\"\n kf = cross_validation.KFold(n=len(data.index), n_folds=folds)\n for i, (train_index, validation_index) in enumerate(kf):\n print ' F%d.' % i\n train = data.iloc[train_index]\n validation = data.iloc[validation_index]\n\n model.fit(train)\n prediction = model.predict(validation)\n actual = data.iloc[validation_index]['count'].as_matrix()\n error = rmsle(prediction, actual)\n errors.append(error)\n return np.mean(errors)", "def cross_validation(features, target, n_neighbors=5, n_folds=5):\n clf = KNeighborsClassifier(n_neighbors=n_neighbors)\n cv_scores = cross_val_score(clf, features, target, cv=n_folds)\n # print each cv score (accuracy) and average them\n print('Cross Validation Scores Mean: %.2f' % (np.mean(cv_scores) * 100))", "def validaccurary(C):\r\n acc=np.zeros((nFolds))\r\n for i in range(0,nFolds):\r\n \r\n train_x, train_y, valid_x, valid_y=get_next_train_valid(i)\r\n w,b=svmfit(i,C)\r\n y_predict=predict(valid_x,w,b)\r\n valid_y=valid_y.reshape(len(valid_y),1)\r\n k=np.sum(np.absolute(y_predict-valid_y))/2\r\n n=len(valid_y)\r\n acc[i]=1-k/n\r\n \r\n accurary=np.mean(acc)\r\n \r\n return accurary", "def cv_performance(posTrainData,negTrainData, num_folds):\n length = len(negTrainData)\n splits = split_cv(length, num_folds)\n accuracy_array = []\n for split in splits:\n accuracy = 0\n train_pos = []\n train_neg = []\n test_neg = []\n test_pos = []\n for x in split.train:\n train_pos.append(posTrainData[x])\n train_neg.append(negTrainData[x])\n for x in split.test:\n test_pos.append(posTrainData[x])\n test_neg.append(negTrainData[x])\n nb = Nb(train_pos,train_neg)\n confusion=nb.confusion_matrix(test_pos,test_neg)\n accuracy = nb.accuracy(confusion)\n accuracy_array.append(accuracy)\n\n return accuracy_array", "def cross_validation_accuracy(clf, X, labels, k):\n ###TODO\n\n cv = KFold(n=len(labels),n_folds=k)\n accuracies = []\n\n \n for train_indices, test_indices in cv:\n \n clf.fit(X[train_indices], labels[train_indices])\n predicted = clf.predict(X[test_indices])\n acc = accuracy_score(labels[test_indices], predicted)\n accuracies.append(acc)\n \n #print('accuracies = ',accuracies) \n #avg = np.mean(accuracies,dtype=np.float64)\n return(np.mean(accuracies,dtype=np.float64))", "def random_search_cv(self, X_test, n_cv=5, n_folds_cv=5, evaluation_metric='top30'):\n # DON'T KNOW IF WE WILL IMPLEMENT IT\n # We may implement a method on a per-classifier bases\n # depending on if the classifier is based on a Scikit-learn classifier\n # or not\n pass", "def fitting_scoring(features, cv=5, verbose=False, is_RFE_mode=False, n_dims_RFE=1):\n # N-fold cross-validation\n num_fold = cv\n accuracy = [0] * num_fold\n for i in range(num_fold):\n training_set = list()\n training_labels = list()\n testing_set = list()\n testing_labels = list()\n for family in features:\n feature_mat = features.get(family)\n if verbose: print(family, \"sample size:\", len(feature_mat))\n\n fold_start = i * int(len(feature_mat) / num_fold)\n fold_end = fold_start + int(len(feature_mat) / num_fold) - 1\n\n # separate training and testing set\n for j in range(len(feature_mat)):\n if fold_start <= j <= fold_end:\n testing_set.append(feature_mat[j])\n testing_labels.append(family)\n else:\n training_set.append(feature_mat[j])\n training_labels.append(family)\n\n p_res = None\n X_new = None\n X_mask = None\n if is_RFE_mode:\n clf = svm.SVC(kernel='linear')\n clf_reduced = RFE(clf, n_dims_RFE, step=1)\n clf_reduced = clf_reduced.fit(training_set, training_labels)\n X_new = clf_reduced.transform(training_set)\n X_mask = clf_reduced.get_support()\n p_res = clf_reduced.predict(testing_set)\n else:\n clf = svm.SVC()\n clf.fit(training_set, training_labels)\n p_res = clf.predict(testing_set)\n\n accuracy[i] = 0\n for j in range(len(p_res)):\n if p_res[j] == testing_labels[j]:\n accuracy[i] += 1\n accuracy[i] = (accuracy[i] / len(p_res)) * 100\n\n if is_RFE_mode:\n if verbose: print('n_dims:', n_dims_RFE, accuracy)\n return np.mean(accuracy), X_new, X_mask\n\n return np.mean(accuracy)", "def cv_5_fold(dataFrame):\n dataframe_collection = {}\n i = 0\n j = 0\n l = 0\n guessed_right = 0\n k = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39]\n\n k_values = []\n # array to store the accuracy evaluation for each number of K\n accuracy_values = {}\n\n myDict = {}\n for j in range(len(k)): # for all values of K neighbour\n\n print(k[j])\n predicted_right = 0\n total_number = 0\n five_accuracies = []\n for i in range(0, 5):\n #aggregating dataframes by fold - e.g. 1 fold becomes test dataframe; 2,3,4,5 folds become one training dataframe\n trainingDataFrame = dataFrame.loc[dataFrame[15] != (i / 4.00)]\n trainingDataFrame = trainingDataFrame.drop([15], axis=1).reset_index(drop=True)\n testDataFrame = dataFrame.loc[dataFrame[15] == (i / 4.00)]\n testDataFrame = testDataFrame.drop([15], axis=1).reset_index(drop=True)\n\n # output is an array of predicted income values for testDataFrame\n output = knn(trainingDataFrame, testDataFrame, k[j])\n\n # for every fold validation loop calculate the accuracy:\n for instance in range(len(testDataFrame)):\n # checking number of right predictions\n if (output[instance] == testDataFrame[14].iloc[instance]):\n predicted_right += 1.00\n total_number += 1.00\n\n # calculate accuracy as percentage of number of prediction divided by total\n accuracy = (predicted_right / total_number) * 100.0\n # add acccuracies for each of the 5 fold tests to an array\n five_accuracies.append(accuracy)\n\n # PROVIDE FINAL EVALUATION FOR K = J, BY FINDING OUT AVERAGE ACCURACY OF THE FIVE FOLD LOOPS:\n evaluation = 0.0\n for accuracy in range(len(five_accuracies)):\n evaluation += five_accuracies[accuracy]\n\n evaluation = evaluation / 5\n\n accuracy_values.update({k[j]: evaluation})\n\n accuracy_values = collections.OrderedDict(sorted(accuracy_values.items()))\n\n # compute which number of neigbors garners greatest accuracy:\n maxAccuracy = 0\n best_neighbour = 0\n # loop through dictionary values:\n for v in accuracy_values.items():\n # if the value is greater than the current maximum, make it the maximum\n if (v[1] > maxAccuracy):\n maxAccuracy = v[1]\n best_neighbour = v[0]\n\n print(\"Max accuracy \", maxAccuracy)\n print(\"Best Neighbor: \", best_neighbour)\n\n # make a text file containing the K-number and associated accuracy:\n str_x = \"k value | accuracy\" + \"\\n\"\n for k, v in accuracy_values.items():\n str_x += str(k) + \" | \" + str(v) + \"\\n\"\n print(str_x)\n\n text_file = open(\"grid.results.txt\", 'w')\n text_file.write(str_x)\n text_file.close()", "def score_features(self, features, predictor, cv_fold, verbose=0):\n # First we optimise the hyper parameters:\n # data has 4 keys but only 2 (x_train and y_train) will be used for the optimization\n best_params = optimize_hyper_parameters(features, predictor, cv_fold, verbose)\n predictor.set_hyper_parameters(best_params)\n\n # Then we fit the predictor:\n predictor.fit(features)\n\n # Afterwards, we generate the prediction\n y_pred = predictor.predict(features)\n\n # Finally, we compute the metrics:\n metric_res = score_prediction(features['y_test'], y_pred)\n\n self.predictor = predictor\n\n return metric_res, best_params", "def run_experiment ( X, y, model_call, param_grid = None, scoring_func = accuracy,cv = KFoldStratifiedCV ( number_of_folds = 5 ),):\n\n scores = []\n iteration = 0\n # Iterate through the split\n for train, test in cv.split ( y ):\n # If first iteration and k values are passed, get the best one\n if iteration == 0 and param_grid:\n k = choose_k (\n X [ train ], y [ train ], model_call, param_grid, scoring_func, cv = cv )\n logger.info ( f\"Choosing k= { k } \" )\n else:\n # Defaults to 1 for condensed.\n k = 1\n\n iteration += 1\n\n # Instantiate the model with the value of k\n model = model_call ( k = k )\n\n # Standardize the data\n standardizer = Standardizer ( mean = True, std = True )\n\n # Fit the model\n model.fit ( X = standardizer.fit_transform ( X [ train ] ), y = y [ train ] )\n\n # make test set predictions\n y_pred = model.predict ( X = standardizer.transform ( X [ test ] ) )\n\n # Append the score\n scores.append ( scoring_func ( y [ test ], y_pred ) )\n \n logger.info ( f\"Avg Score: { np.mean ( scores ) } \" )\n \n return model\n # End run_experiment()", "def cross_validation_score(self, model, x, y, cv, groups):\n losses = []\n for train_idx, test_idx in cv.split(x, y, groups):\n x_tr, x_te = x[train_idx], x[test_idx]\n y_tr, y_te = y[train_idx], y[test_idx]\n\n model.fit(x_tr, y_tr)\n if self.is_classier:\n test_preds = model.predict_proba(x_te)[:, 1]\n else:\n test_preds = model.predict(x_te)[:,]\n loss = self.loss_metric(y_true=y_te, y_pred=test_preds)\n losses.append(loss)\n return np.mean(losses)", "def perform_cv_fold(self, algo, fold, folds):\n # TODO: this is only done for hyperparameter optimization and is not\n # part of the OpenML specification. The OpenML specification would\n # like to have the hyperparameter evaluation inside the evaluate\n # performed by the target algorithm itself. Hyperparameter\n # optimization on the other hand needs these both things to be decoupled\n # For being closer to OpenML one could also call evaluate and pass\n # everything else through kwargs.\n if self.task_type != \"Supervised Classification\":\n raise NotImplementedError(self.task_type)\n\n if self.estimation_procedure[\"type\"] != \\\n \"crossvalidation with holdout\":\n raise NotImplementedError(self.estimation_procedure[\"type\"] )\n\n if self.estimation_procedure[\"parameters\"][\"stratified_sampling\"] != \\\n 'true':\n raise NotImplementedError(self.estimation_procedure[\"parameters\"][\"stratified_sampling\"])\n\n if self.evaluation_measure != \"predictive_accuracy\":\n raise NotImplementedError(self.evaluation_measure)\n\n ########################################################################\n # Test folds\n train_indices, test_indices = self.get_train_test_split()\n\n ########################################################################\n # Crossvalidation folds\n train_indices, validation_indices = self.get_validation_split(fold)\n\n X, Y = self.get_dataset()\n\n algo.fit(X[train_indices], Y[train_indices])\n\n predictions = algo.predict(X[validation_indices])\n accuracy = sklearn.metrics.accuracy_score(Y[validation_indices], predictions)\n return accuracy", "def report_cv_stats(n_fold, model, samples, labels, comment=None):\n\n # compute n-fold cross validation accuracy for model\n accuracy = cross_validation.cross_val_score(model, samples, labels, cv=n_fold)\n\n # compute mean and standard deviation\n accuracy_m = accuracy.mean()\n accuracy_s = accuracy.std()\n\n text = \"\"\n if comment:\n text = \"(\" + comment + \")\"\n\n print(\"Accuracy\" + text + \": %0.2f (+/- %0.2f)\" % (accuracy_m * 100, accuracy_s * 100 * 2))\n\n return accuracy_m, accuracy_s", "def cross_valid(model,x,folds,metric,verbose=True): \r\n\r\n score=[]\r\n \r\n\r\n kf = KFold(folds,shuffle=False,random_state=0) \r\n\r\n\r\n i=0\r\n for train_index, test_index in kf.split(x):\r\n\r\n xtrain = x[train_index,:]\r\n xtest = x[test_index,:]\r\n\r\n model.fit(xtrain[:,:-1],xtrain[:,-1])\r\n\r\n ypred = model.predict(xtest[:,:-1])\r\n\r\n ytrue= xtest[:,-1] \r\n \r\n \r\n if metric == 'mae':\r\n score.append(mae(ytrue,ypred))\r\n elif metric == 'mse':\r\n score.append(mse(ytrue,ypred))\r\n elif metric == 'rrmse':\r\n score.append(rrmse(ytrue,ypred))\r\n\r\n else:\r\n score.append(rmse(xtest[:,-1],ypred))\r\n\r\n if verbose:\r\n print('-'*30)\r\n print(f'\\nFold {i+1} out of {folds}')\r\n print(f'{metric}: {score[i]}')\r\n\r\n i+=1\r\n\r\n if verbose:\r\n print(f'\\n Overall Score:')\r\n print(f'{metric}: Mean: {np.mean(score)} Std: {np.std(score)}')\r\n\r\n\r\n return score", "def accuracy(clf, x, y, cv=5):\n print_classification_info(clf, x, y)\n return cross_val_score(clf, x, y, cv=cv).mean() * 100", "def _score_for_model(meta):\n mean_acc = list()\n pipes = meta[\"pipeline\"]\n acc = meta[\"accuracy\"]\n if \"tagger\" in pipes:\n mean_acc.append(acc[\"tags_acc\"])\n if \"morphologizer\" in pipes:\n mean_acc.append((acc[\"morphs_acc\"] + acc[\"pos_acc\"]) / 2)\n if \"parser\" in pipes:\n mean_acc.append((acc[\"uas\"] + acc[\"las\"]) / 2)\n if \"ner\" in pipes:\n mean_acc.append((acc[\"ents_p\"] + acc[\"ents_r\"] + acc[\"ents_f\"]) / 3)\n if \"textcat\" in pipes:\n mean_acc.append(acc[\"textcat_score\"])\n if \"senter\" in pipes:\n mean_acc.append((acc[\"sent_p\"] + acc[\"sent_r\"] + acc[\"sent_f\"]) / 3)\n return sum(mean_acc) / len(mean_acc)", "def cross_validation(whole_train_data, whole_train_labels, k, k_fold):\n accuracies = []\n for i in range(k_fold):\n train_data, train_labels, validation_data, validation_labels = split_train_and_validation(whole_train_data, whole_train_labels, i, k_fold)\n accuracy = knn(train_data, train_labels, validation_data, validation_labels, k)\n accuracies.append(accuracy)\n avg_accuracy = np.mean(accuracies)\n return avg_accuracy", "def tenfold_cross_validation(X, y):\n\n i = 0\n x_score = []\n y_score = []\n\n for i in range(1, 11):\n for train_index, test_index in KFold(10).split(X):\n x_train, x_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n # change the parameters to see how each parameter affects the l1inear classifier\n linear_classifier = linear_model.SGDClassifier(max_iter=1000, tol=1e-3)\n\n # start training the classifier\n linear_classifier.fit(x_train, y_train)\n\n # create and plot the confusion matrix\n # cross validation done with cross_val_\n y_train_pred = cross_val_predict(linear_classifier, x_test, y_test, cv=10)\n\n print(\"\\n Statistics and Confusion matrix obtained with pandas_ml: \\n\")\n cm = ConfusionMatrix(y_test, y_train_pred)\n stats = cm.stats()\n\n file = open(\"linear_classification_9000_cross_validation_\" + str(i) + \".txt\", \"w\")\n file.write(str(stats))\n file.close()\n\n # cm.print_stats()\n # print confusion matrix\n cm.plot(normalized=True)\n plt.show()", "def cross_validation_score(X_data, y_data, model, folds=5):\n\n # Shuffle index\n index = np.random.permutation(len(X_data))\n\n fold_size = int(len(X_data) / folds)\n scores = []\n for i in range(folds):\n \n # Partition Data\n X_train, X_val = partition_data(X_data[index], folds, i + 1, fold_size)\n y_train, y_val = partition_data(y_data[index], folds, i + 1, fold_size)\n\n # Train Model\n print(f\"Training on Fold: {i + 1}\")\n model.fit(X_train, y_train)\n\n # Predict Values on Validation Set\n val_pred = model.predict(X_val)\n\n # Get Accuracy\n score = accuracy_score(y_val, val_pred)\n scores.append(score)\n \n return sum(scores) / len(scores)", "def get_score(data, labels, fold_pairs, name, model, param, numTopVars,\r\n rank_per_fold=None, parallel=True, rand_iter=-1):\r\n assert isinstance(name, str)\r\n logging.info(\"Classifying %s\" % name)\r\n ksplit = len(fold_pairs)\r\n# if name not in NAMES:\r\n# raise ValueError(\"Classifier %s not supported. \"\r\n# \"Did you enter it properly?\" % name)\r\n\r\n # Redefine the parameters to be used for RBF SVM (dependent on\r\n # training data)\r\n if \"SGD\" in name:\r\n param[\"n_iter\"] = [25] # [np.ceil(10**3 / len(fold_pairs[0][0]))]\r\n classifier = get_classifier(name, model, param, rand_iter=rand_iter)\r\n \r\n if name == \"RBF SVM\": #This doesn't use labels, but looks as ALL data\r\n logging.info(\"RBF SVM requires some preprocessing.\"\r\n \"This may take a while\")\r\n #\r\n is_data_computed_gamma = True\r\n #\r\n if not is_data_computed_gamma:\r\n # Sahil commented the code below that computes the gamma choices from data.\r\n # The computed gamma choices seem too low thereby making SVM very slow. Instead, trying out fixed values.\r\n print param\r\n gamma = param['gamma']\r\n gamma = np.array(gamma)\r\n print 'gamma', gamma\r\n else:\r\n #Euclidean distances between samples\r\n # sahil switched from the first call to second one for computing the dist as the first one is giving error.\r\n # dist = pdist(StandardScaler().fit(data), \"euclidean\").ravel()\r\n dist = pdist(RobustScaler().fit_transform(data), \"euclidean\").ravel()\r\n print 'dist', dist\r\n #Estimates for sigma (10th, 50th and 90th percentile)\r\n sigest = np.asarray(np.percentile(dist, [10, 50, 90]))\r\n print 'sigest', sigest\r\n #Estimates for gamma (= -1/(2*sigma^2))\r\n gamma = 1./(2*sigest**2)\r\n print 'gamma', gamma\r\n #\r\n #\r\n #Set SVM parameters with these values\r\n # sahil changed the code a bit to remove a bug\r\n # param = [{\"kernel\": [\"rbf\"],\r\n # \"gamma\": gamma.tolist(),\r\n # \"C\": np.logspace(-2,2,5).tolist()}]\r\n param = {\"kernel\": [\"rbf\"],\r\n \"gamma\": gamma.tolist(),\r\n \"C\": np.logspace(-2, 2, 5).tolist()}\r\n # if name not in [\"Decision Tree\", \"Naive Bayes\"]:\r\n if param:\r\n if hasattr(classifier,'param_grid'): \r\n # isinstance(classifier, GridSearchCV):\r\n print 'param', param\r\n N_p = np.prod([len(l) for l in param.values()])\r\n elif isinstance(classifier, RandomizedSearchCV):\r\n N_p = classifier.n_iter\r\n else:\r\n N_p = 1\r\n# is_cv = isinstance(classifier, GridSearchCV) or \\\r\n# isinstance(classifier, RandomizedSearchCV)\r\n# print('Name: {}, ksplit: {}, N_p: {}'.format(name, ksplit, N_p))\r\n if (not parallel) or ksplit <= N_p or \\\r\n (name == \"Random Forest\") or (\"SGD\" in name):\r\n logging.info(\"Attempting to use grid search...\")\r\n classifier.n_jobs = PROCESSORS\r\n classifier.pre_dispatch = 1 # np.floor(PROCESSORS/24)\r\n allConfMats = []\r\n allTotalErrs = []\r\n allFittedClassifiers = []\r\n for i, fold_pair in enumerate(fold_pairs):\r\n confMats = []\r\n totalErrs = []\r\n fitted_classifiers = []\r\n logging.info(\"Classifying a %s the %d-th out of %d folds...\"\r\n % (name, i+1, len(fold_pairs)))\r\n if rank_per_fold is not None:\r\n rankedVars = rank_per_fold[i]\r\n else:\r\n rankedVars = np.arange(data.shape[1])\r\n #\r\n for numVars in numTopVars:\r\n logging.info('Classifying for top %i variables' % numVars)\r\n #\r\n # print 'rankedVars', rankedVars\r\n #\r\n confMat, totalErr, fitted_classifier = classify(data[:, rankedVars[:numVars]],\r\n labels,\r\n fold_pair,\r\n classifier)\r\n confMats.append(confMat)\r\n totalErrs.append(totalErr)\r\n fitted_classifiers.append(fitted_classifier)\r\n # recheck the structure of area and fScore variables\r\n allConfMats.append(confMats)\r\n allTotalErrs.append(totalErrs)\r\n allFittedClassifiers.append(fitted_classifiers)\r\n else:\r\n print 'parallel computing going on (debug Sahil ...) ..........................'\r\n #\r\n classifier.n_jobs = PROCESSORS\r\n logging.info(\"Multiprocessing folds for classifier {}.\".format(name))\r\n pool = Pool(processes=min(ksplit, PROCESSORS))\r\n out_list = pool.map(per_split_classifier(data, labels, classifier,\r\n numTopVars),\r\n zip(rank_per_fold, fold_pairs))\r\n pool.close()\r\n pool.join()\r\n #allConfMats = [el[0] for el in out_list]\r\n #allTotalErrs = [el[1] for el in out_list]\r\n #allFittedClassifiers = [el[2] for el in out_list]\r\n allConfMats, allTotalErrs, allFittedClassifiers = tuple(zip(*out_list))\r\n return classifier, allConfMats, allTotalErrs, allFittedClassifiers", "def score_cv(data, dim, lag, number_of_splits=10, validation_fraction=0.5):\n # we temporarily suppress very short-lived progress bars\n with pyemma.util.contexts.settings(show_progress_bars=False):\n if type(data) == list:\n nval = int(len(data) * validation_fraction)\n elif data._is_reader == True:\n nval = data.number_of_trajectories()\n else:\n raise ValueError(\"data must be list of numpy arrays or pyemma reader object\")\n scores = np.zeros(number_of_splits)\n for n in range(number_of_splits):\n if type(data) == list:\n ival = np.random.choice(len(data), size=nval, replace=False)\n elif data._is_reader == True:\n ival = np.random.choice(data.number_of_trajectories(), size=nval, replace=False)\n vamp = coor.vamp(\n [d for i, d in enumerate(data) if i not in ival], lag=lag, dim=dim)\n scores[n] = vamp.score([d for i, d in enumerate(data) if i in ival])\n return scores", "def compute_average_precision_per_class(num_true_cases, gt_boxes,\n\t\t\t\t\t\t\t\t\t\tprediction_file, iou_threshold, use_2007_metric):\n\twith open(prediction_file) as f:\n\t\timage_ids = []\n\t\tboxes = []\n\t\tscores = []\n\t\tfor line in f:\n\t\t\tt = line.rstrip().split(\" \")\n\t\t\timage_ids.append(t[0])\n\t\t\tscores.append(float(t[1]))\n\t\t\tbox = torch.tensor([float(v) for v in t[2:]]).unsqueeze(0)\n\t\t\tbox -= 1.0 # convert to python format where indexes start from 0\n\t\t\tboxes.append(box)\n\t\tscores = np.array(scores)\n\t\tsorted_indexes = np.argsort(-scores)\n\t\tboxes = [boxes[i] for i in sorted_indexes]\n\t\timage_ids = [image_ids[i] for i in sorted_indexes]\n\t\ttrue_positive = np.zeros(len(image_ids))\n\t\tfalse_positive = np.zeros(len(image_ids))\n\t\tmatched = set()\n\t\tfor i, image_id in enumerate(image_ids):\n\t\t\tbox = boxes[i]\n\t\t\tif image_id not in gt_boxes:\n\t\t\t\tfalse_positive[i] = 1\n\t\t\t\tcontinue\n\n\t\t\tgt_box = gt_boxes[image_id]\n\t\t\tious = box_utils.iou_of(box, gt_box)\n\t\t\tmax_iou = torch.max(ious).item()\n\t\t\tmax_arg = torch.argmax(ious).item()\n\t\t\tif max_iou > iou_threshold:\n\t\t\t\tif (image_id, max_arg) not in matched:\n\t\t\t\t\ttrue_positive[i] = 1\n\t\t\t\t\tmatched.add((image_id, max_arg))\n\t\t\t\telse:\n\t\t\t\t\tfalse_positive[i] = 1\n\t\t\telse:\n\t\t\t\tfalse_positive[i] = 1\n\n\ttrue_positive = true_positive.cumsum()\n\tfalse_positive = false_positive.cumsum()\n\tprecision = true_positive / (true_positive + false_positive)\n\trecall = true_positive / num_true_cases\n\tif use_2007_metric:\n\t\treturn measurements.compute_voc2007_average_precision(precision, recall)\n\telse:\n\t\treturn measurements.compute_average_precision(precision, recall)", "def Val_score(n_neighbors):\n \n avg = []\n std = []\n \n for i in n_neighbors:\n \n # 10 fold cross validation for every value of n_neighbor \n score = cross_val_score(KNN(n_neighbors = i), X = train_x, y = train_y, cv = 10)\n \n # adding mean to average list \n avg.append(score.mean())\n \n # adding standard deviation to std list \n std.append(score.std())\n \n return avg, std", "def crossValidate(dataset, folds):\n\tshuffle(dataset)\n\tcv_results = []\n\tprecision_recall_acc = []\n\tfoldSize = int(len(dataset)/folds)\n\tfor i in range(0,len(dataset),foldSize):\n\t\t# preparing data\n\t\tvalD = dataset[i:i+foldSize]\n\t\ttestD = dataset[:i]+dataset[i+foldSize:] #list(set(dataset)-set(dataset[i:i+foldSize]))\n\t\t# Training\n\t\tprint(\"*\"*60)\n\t\tprint(\"Training on data-set size \"+str(len(testD))+\" of batch \"+str(i/(foldSize)))\n\t\tclassi = trainClassifier(testD)\n\t\t# Prediction on validation data \n\t\tprint(\"Predicting on heldout data-set size...\"+str(len(valD))+\" of batch \"+str(i/(foldSize)))\n\t\ty_true = list(map(lambda t: t[1], valD))\n\t\ty_pred = predictLabels(valD,classi)\t\t\n\t\t# Performance Metrics\t\t\n\t\t# average based on macro as it calculate metrics for each label, and find their unweighted mean.\n\t\tprecision_recall = list(precision_recall_fscore_support(y_true, y_pred, average='macro'))\n\t\tacc = accuracy_score(y_true,y_pred)\n\t\tprecision_recall[-1] = acc\n\t\tprint(precision_recall)\n\t\tprecision_recall_acc.append(precision_recall)\n\tdf = pd.DataFrame(precision_recall_acc,columns = [\"Precision\",\"Recall\",\"F1 score\",\"Accuracy Score\"])\n\tprint(df)\n\tcv_results = df.mean().tolist()\n\treturn cv_results", "def k_fold_linear(data: pd.DataFrame, algorithm, folds: int = 5) -> (float, float):\n d = data.sample(frac=1)\n segments = np.array_split(d, folds)\n acc_test = []\n\n acc_train = []\n for i in range(folds):\n temp = segments.copy()\n\n test = temp.pop(i)\n train = pd.concat(temp)\n test_labels = list(test['Labels'])\n train_labels = list(train['Labels'])\n\n model = algorithm(train)\n test_predictions = [round(x, 1) for x in predict_linear_regression(test.drop(['Labels'], axis=1), model)]\n train_predictions = [round(x, 1) for x in predict_linear_regression(train.drop(['Labels'], axis=1), model)]\n\n Confusion_Matrix(test_predictions, test_labels)\n\n acc_test.append(accuracy(test_predictions, test_labels))\n acc_train.append(accuracy(train_predictions, train_labels))\n\n return avg(acc_train), avg(acc_test)", "def performance_metrics(model, X_train, y_train, X_test, y_test, train=True, cv=True):\n from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, f1_score \n from sklearn.metrics import precision_score, recall_score, roc_auc_score\n from sklearn.model_selection import cross_validate, cross_val_score, StratifiedKFold\n scoring = {'acc': 'accuracy',\n 'prec_micro': 'precision_micro',\n 'rec_micro': 'recall_micro',\n 'f1_micro': 'f1_micro',\n 'auc':'roc_auc'} \n if train==True:\n if cv==True:\n kfold=StratifiedKFold(n_splits=10, random_state=42)\n scores = cross_validate(model, X_train, y_train, scoring=scoring, cv=kfold)\n ypredTrain = model.predict(X_train)\n Acc_train = scores['test_acc'].mean()\n Precision_train = scores['test_prec_micro'].mean()\n Recall_train = scores['test_rec_micro'].mean()\n F1_train = scores['test_f1_micro'].mean()\n AUC_train = scores['test_auc'].mean()\n conf_matrix_train = confusion_matrix(y_train, ypredTrain)\n class_report = classification_report(y_train, ypredTrain)\n print(\"TRAIN:\\n===========================================\")\n print(f\"CV - Accuracy : {Acc_train:.2f}\\n\")\n print(f\"CV - Precision: {Precision_train:.2f}\\n\")\n print(f\"CV - Recall: {Recall_train:.2f}\\n\")\n print(f\"CV - F1 score: {F1_train:.2f}\\n\") \n print(f\"CV - AUC score: {AUC_train:.2f}\\n\") \n print(f\"Confusion Matrix:\\n {conf_matrix_train}\\n\")\n print(f\"Classification Report:\\n {class_report}\\n\") \n elif cv==False:\n scores = cross_validate(model, X_train, y_train, scoring=scoring)\n ypredTrain = model.predict(X_train)\n Acc_train = scores['test_acc'].mean()\n Precision_train = scores['test_prec_micro'].mean()\n Recall_train = scores['test_rec_micro'].mean()\n F1_train = scores['test_f1_micro'].mean()\n AUC_train = scores['test_auc'].mean()\n conf_matrix_train = confusion_matrix(y_train, ypredTrain)\n class_report = classification_report(y_train, ypredTrain)\n print(\"TRAIN:\\n===========================================\")\n print(f\"CV - Accuracy : {Acc_train:.2f}\\n\")\n print(f\"CV - Precision: {Precision_train:.2f}\\n\")\n print(f\"CV - Recall: {Recall_train:.2f}\\n\")\n print(f\"CV - F1 score: {F1_train:.2f}\\n\") \n print(f\"CV - AUC score: {AUC_train:.2f}\\n\") \n print(f\"Confusion Matrix:\\n {conf_matrix_train}\\n\")\n print(f\"Classification Report:\\n {class_report}\\n\")\n elif train==False:\n if cv==True:\n kfold=StratifiedKFold(n_splits=10, random_state=42)\n scores = cross_validate(model, X_test, y_test, scoring=scoring, cv=kfold)\n ypredTest = model.predict(X_test)\n Acc_test = scores['test_acc'].mean()\n Precision_test = scores['test_prec_micro'].mean()\n Recall_test = scores['test_rec_micro'].mean()\n F1_test = scores['test_f1_micro'].mean()\n AUC_test = scores['test_auc'].mean()\n conf_matrix_test = confusion_matrix(y_test, ypredTest)\n class_report = classification_report(y_test, ypredTest) \n print(\"TEST:\\n===========================================\")\n print(f\"CV - Accuracy : {Acc_test:.2f}\\n\")\n print(f\"CV - Precision: {Precision_test:.2f}\\n\")\n print(f\"CV - Recall: {Recall_test:.2f}\\n\")\n print(f\"CV - F1 score: {F1_test:.2f}\\n\") \n print(f\"CV - AUC score: {AUC_test:.2f}\\n\") \n print(f\"Confusion Matrix:\\n {conf_matrix_test}\\n\")\n print(f\"Classification Report:\\n {class_report}\\n\")\n elif cv==False:\n scores = cross_validate(model, X_test, y_test, scoring=scoring)\n ypredTest = model.predict(X_test)\n Acc_test = scores['test_acc'].mean()\n Precision_test = scores['test_prec_micro'].mean()\n Recall_test = scores['test_rec_micro'].mean()\n F1_test = scores['test_f1_micro'].mean()\n AUC_test = scores['test_auc'].mean()\n conf_matrix_test = confusion_matrix(y_test, ypredTest)\n class_report = classification_report(y_test, ypredTest) \n print(\"TEST:\\n===========================================\")\n print(f\"CV - Accuracy : {Acc_test:.2f}\\n\")\n print(f\"CV - Precision: {Precision_test:.2f}\\n\")\n print(f\"CV - Recall: {Recall_test:.2f}\\n\")\n print(f\"CV - F1 score: {F1_test:.2f}\\n\") \n print(f\"CV - AUC score: {AUC_test:.2f}\\n\") \n print(f\"Confusion Matrix:\\n {conf_matrix_test}\\n\")\n print(f\"Classification Report:\\n {class_report}\\n\")", "def findBestScore():\n resultList = []\n BestScore = 0\n # iterate through different max_depths from 1 to 19\n for max_depth in range(1,20):\n dtree = tree.DecisionTreeClassifier(max_depth=max_depth)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n dtree = dtree.fit(cv_data_train, cv_target_train)\n dtree.feature_importances_\n trainng_score += [dtree.score(cv_data_train,cv_target_train)]\n testing_score += [dtree.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_depth = max_depth\n resultList += [[best_depth, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding max_depth is: ')\n return BestScore, best_depth", "def cross_validation_accuracy(clf, X, labels, k):\n abc = KFold(k, False)\n answer = []\n\n for train_idx, test_idx in abc.split(X):\n Xt, Xs = X[train_idx],X[test_idx]\n tr, ts=labels[train_idx],labels[test_idx]\n clf.fit(Xt,tr)\n final_ans = clf.predict(Xs)\n acc1 = accuracy_score(ts, final_ans)\n answer.append(acc1)\n\n return np.mean(np.array(answer))", "def classify(X, Y, skf, clf, round_threshold=0.5, average=\"macro\"):\n X = X.values\n if isinstance(Y, pd.Series):\n labels = [\"{}_0\".format(Y.name), \"{}_1\".format(Y.name)]\n Y = np.ravel(Y)\n else:\n Y, labels = Y.values, list(Y.columns)\n\n fold_results = []\n for train, test in skf.split(X, Y):\n current_clf = clone(clf)\n X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]\n\n current_clf.fit(X_train, Y_train)\n Y_prob = current_clf.predict_proba(X_test)\n Y_pred = current_clf.predict(X_test)\n\n (p, r, f1, auc, jac, hl, p_c,\n r_c, f1_c, s_c) = calculate_metrics(Y_test, Y_pred, Y_prob, average)\n\n # calculate overall scores for current fold\n fold_scores = {\n \"precision\": p,\n \"recall\": r,\n \"f1\": f1,\n \"auc\": auc,\n \"jaccard\": jac,\n \"hamming_loss\": hl\n }\n\n for i in range(len(labels)):\n fold_scores[\"precision_{0}\".format(labels[i])] = p_c[i]\n fold_scores[\"recall_{0}\".format(labels[i])] = r_c[i]\n fold_scores[\"f1_{0}\".format(labels[i])] = f1_c[i]\n fold_scores[\"support_{0}\".format(labels[i])] = s_c[i]\n\n fold_results.append({\n \"scores\": fold_scores,\n \"y_pred\": Y_pred,\n \"y_prob\": Y_prob,\n \"y_test\": Y_test\n })\n\n scores = {}\n for score in fold_results[0][\"scores\"].keys():\n values = [s[\"scores\"][score] for s in fold_results]\n scores[score] = (np.sum(values) if score.startswith(\"support_\")\n else np.mean(values))\n\n return scores, fold_results", "def cv_training(\n db: audformat.Database,\n partitioning: str,\n features: pd.DataFrame,\n normalization: str,\n root: str\n):\n\n df = db['covid'].df\n df = df.loc[~df['covid'].isna()]\n df['covid'] = df['covid'].apply(lambda x: 'positive' if x else 'negative')\n df['speaker'] = db['files'].get(index=df.index)['speaker']\n folds = sorted(list(set([x.split('.')[-2] for x in db.tables if f'folds.{partitioning}' in x])))\n\n metrics = {\n 'F1': audmetric.unweighted_average_fscore,\n 'UAR': audmetric.unweighted_average_recall,\n 'ACC': audmetric.accuracy\n }\n\n if not os.path.exists(os.path.join(root, 'results.csv')):\n for fold in folds:\n\n def get_fold(db, fold_name):\n df = db[f'folds.{partitioning}.{fold}.{fold_name}'].df\n df['speaker'] = db['files'].get(index=df.index)['speaker']\n df = df.loc[~df['covid'].isna()]\n df['covid'] = df['covid'].apply(lambda x: 'positive' if x else 'negative')\n return df\n df_train = get_fold(db, 'train')\n df_dev = get_fold(db, 'dev')\n df_test = get_fold(db, 'test')\n\n features = features.fillna(0)\n\n c_params = [\n .0001, \n .0005, \n .001, \n .005, \n .01, \n .05, \n .1, \n .5, \n 1\n ]\n\n steps = []\n if normalization == 'standard':\n # normalization performed on the fly for each fold\n steps.append(('scale', StandardScaler()))\n steps.append(('classify', SVC(kernel='rbf', probability=True)))\n\n max_f1 = 0\n best_c = None\n for c_param in audeer.progress_bar(\n c_params,\n total=len(c_params),\n desc='LOSO',\n disable=True\n ):\n \n clf = Pipeline(steps)\n clf.set_params(**{'classify__C': c_param})\n clf.fit(\n features.loc[df_train.index],\n df_train['covid'],\n )\n pred = clf.predict(features.loc[df_dev.index])\n f1_score = audmetric.unweighted_average_fscore(df_dev['covid'], pred)\n if f1_score > max_f1:\n max_f1 = f1_score\n best_c = c_param\n \n clf.set_params(**{'classify__C': best_c})\n clf.fit(\n features.loc[pd.concat((df_train, df_dev)).index],\n pd.concat((df_train, df_dev))['covid'],\n )\n joblib.dump(\n clf,\n os.path.join(root, f'clf.{fold}.pkl')\n )\n df.loc[df_test.index, 'predictions'] = clf.predict(features.loc[df_test.index])\n df.loc[df_test.index, 'probabilities'] = clf.predict_proba(features.loc[df_test.index])[:, 0]\n \n df.reset_index(inplace=True)\n df.to_csv(os.path.join(root, 'results.csv'), index=False)\n else:\n df = pd.read_csv(os.path.join(root, 'results.csv'))\n\n results = {\n key: metrics[key](df['covid'], df['predictions'])\n for key in metrics\n }\n with open(os.path.join(root, 'results.yaml'), 'w') as fp:\n yaml.dump(results, fp)\n\n file_df = df.groupby('file').apply(\n lambda x: pd.Series({\n 'covid': x['covid'].mode()[0],\n 'predictions': x['predictions'].mode()[0]\n })\n )\n\n results = {\n key: metrics[key](file_df['covid'], file_df['predictions'])\n for key in metrics\n }\n with open(os.path.join(root, 'speaker_results.yaml'), 'w') as fp:\n yaml.dump(results, fp)", "def kfold_scoring(self, data_frame, target, pipeline):\n\n fold_score = []\n macro = ['recall', 'f1', 'precision']\n number_of_folds = -1\n Folds = {}\n\n kf = KFold(n_splits=10, random_state=None, shuffle=True)\n\n for train_index, test_index in kf.split(data_frame):\n X_train = data_frame[train_index]\n X_test = data_frame[test_index]\n y_train = target[train_index]\n y_test = target[test_index]\n number_of_folds = number_of_folds + 1\n # Append the predicted labels.\n y_predict = self.fit_predict_model(X_train, y_train, X_test, pipeline)\n\n Folds[str(number_of_folds)] = {\n \"predicted\": y_predict,\n \"Actual\": y_test\n }\n\n if self.problem_type == 'regression':\n if self.scoring is not None:\n result = self.regression_scoring_function[self.scoring](y_predict, y_test)\n else:\n result = self.regression_scoring_function['r2_score'](y_predict, y_test)\n else:\n if self.scoring is not None:\n if self.scoring not in macro:\n result = self.classification_scoring_function[self.scoring](\n y_predict, y_test)\n else:\n result = self.classification_scoring_function[self.scoring](\n y_predict, y_test, average='macro')\n else:\n result = self.classification_scoring_function['f1'](\n y_predict, y_test, average='macro')\n\n fold_score.append(result)\n self.pipeline_dict['folds'] = Folds\n return np.mean(fold_score)", "def average_ndcg(self, r):\n scores = []\n score = []\n for rank_max in range(1, len(r[0]) + 1):\n score = []\n for data in r:\n score.append(self.ndcg_at_k(data[:rank_max], rank_max, method = 0))\n scores.append(reduce(lambda x, y: x + y, score) / len(score))\n return scores", "def cv(preds_path_stem, num_ensemble=1):\n fold_accs = []\n fold_c_matricies = []\n for fold in range(1, 6):\n data_val = load_dataset(\n f'Data/esc50_mel_wind_tfr/raw/fold_{fold}.tfrecords')\n pred_paths=[f'{preds_path_stem}preds_fold_{i}_{fold}.npy'\n for i in range(1, num_ensemble+1)]\n fold_acc, fold_c_matrix = test_wind_mel_model(pred_paths, data_val)\n fold_accs.append(fold_acc)\n fold_c_matricies.append(fold_c_matrix)\n cv_acc = np.mean(fold_accs)\n cv_acc_std = np.std(fold_accs)\n c_matrix = np.sum(fold_c_matricies, axis=0) / np.sum(fold_c_matricies)\n np.save(f'{preds_path_stem}cmatrix_{num_ensemble}.npy', c_matrix)\n print(f\"The cross validation accuracy is {cv_acc:.4f} \"\n f\"+/- 1.96 * {cv_acc_std:.4f}\")", "def get_scores(self, X_val):\n \n if not self.clf_fit:\n raise RuntimeError('Call clf.fit before clf.predict.')\n \n # Create predictions from learners\n preds = list()\n for i in range(self.num_base_learners):\n pred = self.clfs[i].predict(X_val)\n preds.append(pred)\n \n # Average results\n preds = np.vstack(preds)\n preds = preds.T\n \n scores = list()\n for pred in preds:\n scores.append(float(sum(pred))/float(preds.shape[1]))\n \n return scores", "def crossValidationKfold(automodel, \r\n X, y,\r\n params_automl : dict = {},\r\n score_function = accuracy_score,\r\n cv : int = 3,\r\n shuffle: bool = True,\r\n verbose : bool = True,\r\n allmetrics: bool = False):\r\n if(isinstance(X, pd.DataFrame) or isinstance(y, pd.DataFrame)):\r\n X = X.values\r\n y = y.values\r\n skf = StratifiedKFold(n_splits = cv, \r\n shuffle = shuffle, \r\n random_state = 42)\r\n if(allmetrics):\r\n train_scores = {'accuracy' : [], \r\n 'roc_auc': [], \r\n 'f1' : [], \r\n 'recall' : [], \r\n 'precision': []}\r\n test_scores = {'accuracy' : [], \r\n 'roc_auc': [], \r\n 'f1' : [], \r\n 'recall' : [], \r\n 'precision': []}\r\n else:\r\n train_scores = np.empty((cv, ))\r\n test_scores = np.empty((cv, ))\r\n for idx, (idx_tr, idx_ts) in enumerate(skf.split(X, y)):\r\n X_tr, X_ts = X[idx_tr], X[idx_ts]\r\n y_tr, y_ts = y[idx_tr], y[idx_ts] \r\n am = automodel(**params_automl)\r\n am.fit(X_tr, y_tr)\r\n if(not allmetrics):\r\n \r\n train_scores[idx] = score_function(am.predict(X_tr), y_tr)\r\n test_scores[idx] = score_function(am.predict(X_ts), y_ts)\r\n if(verbose):\r\n print('it: {} train score: {:.3f}, val score: {:.3f}'.format(idx, \r\n train_scores[idx],\r\n test_scores[idx]))\r\n else:\r\n train_current = {}\r\n test_current = {}\r\n for name, metric in all_metrics_classifications.items():\r\n train_current[name] = metric(am.predict(X_tr), y_tr)\r\n test_current[name] = metric(am.predict(X_ts), y_ts)\r\n train_scores[name].append(train_current[name])\r\n test_scores[name].append(test_current[name])\r\n \r\n if(verbose):\r\n print('it: {} train scores: {}, val scores: {}'.format(idx, train_current,\r\n test_current))\r\n\r\n if(not allmetrics):\r\n return test_scores.mean(), test_scores.std()\r\n else:\r\n # -- calculate means of all metrics-- #\r\n return dict(map(lambda kv: (kv[0], np.asarray(kv[1]).mean()), test_scores.items()))", "def CV_fit(model, data, folds=5, random_state: int=None):\n kf = KFold(n_splits=folds, shuffle=False, random_state=random_state)\n kf = kf.split(X=data[0])\n\n # Fit k models and store them\n results = []\n for train_ids, test_ids in kf:\n scaler = pka_scaler(data[1][train_ids])\n if model.data_type == 'descriptors':\n desc_scaler = StandardScaler()\n desc_scaler.fit(data[0][train_ids])\n data[0] = desc_scaler.transform(data[0])\n fold_model = train(model, train_ids, data, scaler)\n fold_result = test(model, fold_model, test_ids, data, scaler)\n\n results.append(fold_result)\n avg_result = np.mean(results, axis=0)\n return avg_result, results", "def k_fold_tree(data: pd.DataFrame, algorithm, folds: int = 5) -> (float, float):\n d = data.sample(frac=1)\n segments = np.array_split(d, folds)\n acc_test = []\n acc_train = []\n for i in range(folds):\n temp = segments.copy()\n test = temp.pop(i)\n\n test_labels = list(test['Labels'])\n\n train = pd.concat(temp)\n\n train_labels = list(train['Labels'])\n\n model = algorithm(train)\n\n test_predictions = predict_data(test, model)\n train_predictions = predict_data(train, model)\n\n acc_test.append(accuracy(test_predictions, test_labels))\n acc_train.append(accuracy(train_predictions, train_labels))\n\n return avg(acc_train), avg(acc_test)", "def fit(self, X, y, train_index):\n if self.cv is None:\n kfold = KFold(n_splits=5, shuffle=True)\n elif isinstance(self.cv, (int, np.integer)):\n kfold = KFold(n_splits=self.cv, shuffle=True)\n elif isinstance(self.cv, KFold):\n kfold = self.cv\n else:\n raise ValueError(\n \"'cv' must be None, an integer or a KFold instance \"\n \"(got {0})\".format(self.cv)\n )\n\n self._train_index = train_index\n\n gamma_values = []\n C_values = []\n mae_val_values = []\n mean_mae_val_values = []\n\n y_train = y[train_index]\n for gamma in self.param_grid['gamma']:\n X_rbf = np.exp(-gamma * X)\n X_train = X_rbf[train_index[:, None], train_index]\n\n for C in self.param_grid['C']:\n self.estimator.set_params(C=C)\n mae_val_split = []\n for train_train_index, train_val_index in kfold.split(\n X_train, y_train\n ):\n X_train_train = X_train[train_train_index[:, None],\n train_train_index]\n X_train_val = X_train[train_val_index[:, None],\n train_train_index]\n y_train_train = y_train[train_train_index]\n y_train_val = y_train[train_val_index]\n\n self.estimator.fit(X_train_train, y_train_train)\n y_pred = self.estimator.predict(X_train_val)\n if self.age_range is not None:\n y_pred = np.clip(y_pred, *self.age_range)\n score = mean_absolute_error(y_train_val, y_pred)\n\n mae_val_split.append(score)\n\n gamma_values.append(gamma)\n C_values.append(C)\n mae_val_values.append(mae_val_split)\n mean_mae_val_values.append(np.mean(mae_val_split))\n\n idx = np.argmin(mean_mae_val_values)\n best_C = C_values[idx]\n best_gamma = gamma_values[idx]\n self.best_params_ = {'C': best_C, 'gamma': best_gamma}\n\n C_values = np.asarray(C_values).reshape(-1, 1)\n gamma_values = np.asarray(gamma_values).reshape(-1, 1)\n mae_val_values = np.asarray(mae_val_values).reshape(\n -1, kfold.get_n_splits())\n mean_mae_val_values = np.asarray(mean_mae_val_values).reshape(-1, 1)\n\n cv_results = np.c_[C_values,\n gamma_values,\n np.round(mae_val_values, 4),\n np.round(mean_mae_val_values, 4)]\n columns = ['C', 'gamma']\n columns += ['test_score_split{0}'.format(i)\n for i in range(mae_val_values.shape[1])]\n columns += ['mean_test_score']\n cv_results = pd.DataFrame(cv_results, columns=columns)\n self.cv_results_ = cv_results\n\n self._X_rbf = np.exp(- best_gamma * X)\n self._y = y\n self.best_estimator_ = self.estimator\n self.best_estimator_.set_params(C=best_C)\n self.best_estimator_.fit(self._X_rbf[train_index[:, None],\n train_index], y_train)", "def cvWorker(epochs, theta, folds, trainFunc, testFunc, report, queue):\n\n # track how many correct predictions are made over all folds with current hyperparams\n totalCorrect = 0;\n totalAttempts = 0;\n for (i,f) in enumerate(folds): \n \n testFold = f;\n trainFold = reduce(operator.add, folds[:i] + folds[i+1:]); # flatten training fold \n \n # learn weights \n w = trainFunc(trainFold, epochs, theta);\n \n # accumulate test accuracy\n [correct, attempts] = testFunc(w, testFold);\n totalCorrect += correct;\n totalAttempts += attempts; \n \n # update based on results and post to queue\n rate = totalCorrect / totalAttempts;\n if not (report == None):\n tabs = '\\t' * report;\n print tabs, 'Cross validation accuracy=', rate, 'for theta=', theta;\n results = (theta, rate); \n queue.put(results)\n \n return;", "def fit(self, X, y, train_index):\n if self.cv is None:\n kfold = KFold(n_splits=5, shuffle=True)\n elif isinstance(self.cv, (int, np.integer)):\n kfold = KFold(n_splits=self.cv, shuffle=True)\n elif isinstance(self.cv, KFold):\n kfold = self.cv\n else:\n raise ValueError(\n \"'cv' must be None, an integer or a KFold instance \"\n \"(got {0})\".format(self.cv)\n )\n\n self._train_index = train_index\n\n C_values = []\n mae_val_values = []\n mean_mae_val_values = []\n\n y_train = y[train_index]\n X_train = X[train_index[:, None], train_index]\n\n for C in self.param_grid['C']:\n self.estimator.set_params(C=C)\n mae_val_split = []\n for train_train_index, train_val_index in kfold.split(\n X_train, y_train\n ):\n X_train_train = X_train[train_train_index[:, None],\n train_train_index]\n X_train_val = X_train[train_val_index[:, None],\n train_train_index]\n y_train_train = y_train[train_train_index]\n y_train_val = y_train[train_val_index]\n\n self.estimator.fit(X_train_train, y_train_train)\n y_pred = self.estimator.predict(X_train_val)\n if self.age_range is not None:\n y_pred = np.clip(y_pred, *self.age_range)\n score = mean_absolute_error(y_train_val, y_pred)\n\n mae_val_split.append(score)\n\n C_values.append(C)\n mae_val_values.append(mae_val_split)\n mean_mae_val_values.append(np.mean(mae_val_split))\n\n idx = np.argmin(mean_mae_val_values)\n best_C = C_values[idx]\n self.best_params_ = {'C': best_C}\n\n C_values = np.asarray(C_values).reshape(-1, 1)\n mae_val_values = np.asarray(mae_val_values).reshape(\n -1, kfold.get_n_splits())\n mean_mae_val_values = np.asarray(mean_mae_val_values).reshape(-1, 1)\n\n cv_results = np.c_[C_values,\n np.round(mae_val_values, 4),\n np.round(mean_mae_val_values, 4)]\n columns = ['C']\n columns += ['test_score_split{0}'.format(i)\n for i in range(mae_val_values.shape[1])]\n columns += ['mean_test_score']\n cv_results = pd.DataFrame(cv_results, columns=columns)\n self.cv_results_ = cv_results\n\n self._X = X\n self._y = y\n self.best_estimator_ = self.estimator\n self.best_estimator_.set_params(C=best_C)\n self.best_estimator_.fit(self._X[train_index[:, None], train_index],\n y_train)", "def cross_validate(features, outputs, model): \n # do k-folds cross validation\n scores = cross_validation.cross_val_score(model,\n features,\n outputs)\n\n # get average accuracy\n return np.average(scores)", "def muc_scores(self):\n A_card, B_card = self.shape\n V_card = len(self)\n N = self.grand_total\n\n recall = _div(N - V_card, N - A_card)\n precision = _div(N - V_card, N - B_card)\n fscore = hmean(recall, precision)\n return precision, recall, fscore", "def kFoldCrossValidation(self, n_splits ):\n X = self.X\n y = self.y\n\n k_fold = KFold(n_splits)\n model = self.model\n\n for train, test in k_fold.split(X):\n model.fit(X[train], y[train])\n p = model.predict( X[test] )\n # Add line for scores\n\n return model #return scores here?", "def xgboost_cv(self, nsplits: int = 5) -> (float, float, float):\r\n x_train, x_test, y_train, y_test = train_test_split(self.x, self.y, test_size=0.2)\r\n params = {\r\n \"max_depth\": [2, 3, 5, 8],\r\n \"eta\": [0.01, 0.05, 0.1, 0.15, 0.2],\r\n \"objective\": ['binary:logistic'],\r\n \"sumsample\": [0.5, 0.7, 1],\r\n \"colsample_bytree\": [0.5, 0.7, 1],\r\n \"n_estimators\": [50, 100, 200, 500],\r\n }\r\n \"\"\"\r\n fit_params = {\r\n \"early_stopping_rounds\": 20,\r\n \"eval_metric\": \"error\",\r\n \"eval_set\": [(x_test, y_test)]\r\n }\r\n \"\"\"\r\n model = xgb.XGBClassifier()\r\n gridcv = GridSearchCV(model, params, cv=nsplits)\r\n gridcv.fit(x_train, y_train) # , **fit_params)\r\n best_params = gridcv.best_params_\r\n cv = KFold(n_splits=nsplits)\r\n acc_result = []\r\n for train, test in cv.split(self.x):\r\n x_train = self.x[train, :]\r\n x_test = self.x[test, :]\r\n y_train = self.y[train]\r\n y_test = self.y[test]\r\n model = xgb.XGBClassifier(**best_params).fit(x_train, y_train)\r\n \"\"\"\r\n x_t, x_v, y_t, y_v = train_test_split(x_train, y_train, test_size=0.2)\r\n model = xgb.XGBClassifier(**best_params).fit(x_t, y_t, eval_metric=\"error\", eval_set=[(x_v, y_v)],\r\n early_stopping_rounds=20)\r\n \"\"\"\r\n y_predict = model.predict(x_test)\r\n acc_result.append(binary_acc(y_test, y_predict))\r\n return np.mean(acc_result), np.std(acc_result), best_params", "def top10_accuracy_scorer(estimator, X, y):\n # predict the probabilities across all possible labels for rows in our training set\n probas = estimator.predict_proba(X)\n \n # get the indices for top 10 predictions for each row; these are the last ten in each row\n # Note: We use argpartition, which is O(n), vs argsort, which uses the quicksort algorithm \n # by default and is O(n^2) in the worst case. We can do this because we only need the top ten\n # partitioned, not in sorted order.\n # Documentation: https://numpy.org/doc/1.18/reference/generated/numpy.argpartition.html\n \n \n top10_idx = np.argpartition(probas, -10, axis=1)[:, -10:]\n \n # index into the classes list using the top ten indices to get the class names\n top10_preds = estimator.classes_[top10_idx]\n\n # check if y-true is in top 10 for each set of predictions\n mask = top10_preds == y.reshape((y.size, 1))\n \n # take the mean\n top_10_accuracy = mask.any(axis=1).mean()\n \n return top_10_accuracy", "def evaluate(train, train_labels, test, test_labels):\n \n # Use the same model for each training set for now\n model = RandomForestClassifier(n_estimators = 100, \n random_state = 50, n_jobs = -1)\n \n train = train.replace({np.inf: np.nan, -np.inf: np.nan})\n test = test.replace({np.inf: np.nan, -np.inf:np.nan})\n \n feature_names = list(train.columns)\n \n # Impute the missing values\n imputer = Imputer(strategy = 'median', axis = 1)\n train = imputer.fit_transform(train)\n test = imputer.transform(test)\n \n cv_score = 1 * cross_val_score(model, train, train_labels, \n scoring = \"f1\", \n cv = 5)\n \n # Fit on the training data and make predictions\n model.fit(train, train_labels)\n preds = model.predict(test)\n \n # Calculate the performance\n f1 = f1_score(test_labels, preds)\n print('5-fold CV F1: {:.2f} with std: {:.2f}'.format(cv_score.mean(),cv_score.std()))\n print('Test F1: {:.2f}.'.format(f1))\n \n feature_importances = pd.DataFrame({'feature': feature_names, \n 'importance': model.feature_importances_})\n \n return preds, feature_importances", "def objective(params, n_folds=N_FOLDS):\n\n # Keep track of evals\n global ITERATION\n\n ITERATION += 1\n\n # Retrieve the subsample if present otherwise set to 1.0\n subsample = params['boosting_type'].get('subsample', 1.0)\n\n # Extract the boosting type\n params['boosting_type'] = params['boosting_type']['boosting_type']\n params['subsample'] = subsample\n\n # Make sure parameters that need to be integers are integers\n for parameter_name in ['max_depth', 'subsample_for_bin', 'min_child_samples','min_child_weight','num_parallel_tree']:\n params[parameter_name] = int(params[parameter_name])\n\n start = timer()\n\n print('params',params)\n # Perform n_folds cross validation\n cv_results = xgb.cv(params, train_set,\n num_boost_round=3000,\n nfold=n_folds,\n stratified=True,\n early_stopping_rounds=100,\n feval=tpr_weight_funtion_xgb_cv,\n seed=50,\n verbose_eval=True,\n\n )\n\n print('cv_results\\n',type(cv_results),'\\n',cv_results)\n\n run_time = timer() - start\n\n # Extract the best score\n best_score = np.min(cv_results['test-TPR-mean'])\n\n # Loss must be minimized\n loss = best_score\n\n TPR_std = cv_results[cv_results['test-TPR-mean']==best_score]['test-TPR-std'].values[0]\n print('TPR_stdv', TPR_std)\n\n\n # Boosting rounds that returned the highest cv score\n n_estimators = int(np.argmin(cv_results['test-TPR-mean']) + 1)\n\n # Write to the csv file ('a' means append)\n of_connection = open(out_file, 'a')\n writer = csv.writer(of_connection)\n writer.writerow([loss,TPR_std, params, ITERATION, n_estimators, run_time])\n\n # Dictionary with information for evaluation\n return {'loss': loss,'TPR_std':TPR_std, 'params': params, 'iteration': ITERATION,\n 'estimators': n_estimators,\n 'train_time': run_time, 'status': STATUS_OK}", "def score_game(game_core):\n \n att_counter = [] \n np.random.seed(1) # fix RANDOM SEED so the experiment is reproducible \n random_array = np.random.randint(1,101, size=(1000))\n for number in random_array:\n att_counter.append(game_core(number))\n score = int(np.mean(att_counter))\n print(f\"Your algorithm guesses on average the number in {score} attempts.\")\n return(score)", "def run_CV(X,y,model,func, n_splits = 3, how = 'up', categorical = 'label_encoder'):\n logloss = []\n skf = StratifiedKFold(n_splits = n_splits, random_state = 144)\n for i, (train_idx, val_idx) in enumerate(skf.split(X,y)):\n X_train, X_val = X.iloc[train_idx], X.iloc[val_idx]\n y_train, y_val = y[train_idx], y[val_idx]\n\n # # SMOTE\n # X_train = X_train.drop('poor', axis = 1) # drop target\n # cat_columns = X_train.select_dtypes(['object']).columns\n # X_train[cat_columns] = X_train[cat_columns].apply(LabelEncoder().fit_transform)\n # orig_cols = X_train.columns # SMOTE will return a numpy array. Store the column names here to recreate the dataframe for feature engineering/transforms below\n # X_train, y_train = SMOTE().fit_sample(X_train,y_train)\n # # recreate dataframe\n # X_train = pd.DataFrame(X_train, columns = orig_cols)\n\n if how is not None:\n # resample to balance data\n X_resampled = resample_data(X_train, how = how)\n # store the targets now that they are balanced\n y_train = X_resampled['poor']\n # drop target from train\n X_train = X_resampled.drop('poor', axis = 1)\n X_val.drop('poor', axis = 1, inplace = True)\n # print(X_val.columns.values)\n ####### feature engineering goes blow this comment:\n \n func(X_train)\n func(X_val)\n \n ###### end feature eng\n X_train = pre_process_data(X_train, normalize_num='standardize', categorical = categorical)\n assert X_train.shape[0] == y_train.shape[0]\n\n model.fit(X_train, y_train)\n # standardize X_val to predict\n X_val = pre_process_data(X_val,normalize_num= 'standardize', enforce_cols=X_train.columns, categorical = categorical)\n preds = model.predict_proba(X_val)\n \n logloss.append(log_loss(y_val, preds[:,1]))\n \n return logloss", "def predict_and_cv_score(self, x_data, y_data, custom_kfold=None):\n f1_scores = []\n recall_scores = []\n precision_scores = []\n accuracy_scores = []\n for model in self.list_of_models:\n f1_scores.append(cross_val_score(model, x_data, y_data, cv=custom_kfold, scoring='f1').mean())\n recall_scores.append(cross_val_score(model, x_data, y_data, cv=custom_kfold, scoring='recall').mean())\n precision_scores.append(cross_val_score(model, x_data, y_data, cv=custom_kfold, scoring='precision').mean())\n accuracy_scores.append(cross_val_score(model, x_data, y_data, cv=custom_kfold, scoring='accuracy').mean())\n return f1_scores, recall_scores, precision_scores, accuracy_scores", "def train_cv(X_train, Y_train, nfold = 5, early_stopping_rounds = 20):\n # model params\n params = { \"objective\" : \"multiclass\",\n \"num_class\" : 6,\n \"verbosity\" : -1 }\n\n # create dataset for lightgbm\n lgb_train = lgb.Dataset(X_train, Y_train)\n \n # cross validate to find optimal no of iterations\n r = lgb.cv(params, \n lgb_train, \n 10000,\n early_stopping_rounds = early_stopping_rounds,\n nfold = nfold,\n feval = accuracy_error,\n metrics = 'None',\n verbose_eval = True,\n seed = 42)\n\n # Highest score\n r_best = np.max(r['accuracy-mean'])\n\n # best number of estimators\n best_estimators = np.argmax(r['accuracy-mean']) + 1\n print(best_estimators)\n\n print(f'The maxium accuracy on the validation set was {r_best:.5f}')\n print(f'The ideal number of iterations was {best_estimators}.')\n\n # Fit on all of the training data using the ideal number of iterations\n model = lgb.LGBMClassifier(n_estimators=best_estimators, n_jobs = -1,\n **params, random_state = 42) \n model.fit(X_train, Y_train)\n\n return model", "def _score(self, estimator, train, test):\n b = estimator.fit(self.A[train], self.b[train]).predict(self.A[test])\n return accuracy_score(self.b[test], b)", "def generate_cross_val_score(clf, data, target, cv):\n return cross_val_score(clf, data, target, cv=cv)", "def cv_train(dataset, labels, cv=10):\n reg = linear_model.BayesianRidge()\n mae_list = -cross_val_score(reg, dataset, labels, cv=cv, n_jobs=-1, scoring='neg_mean_absolute_error')\n rmse_list = np.sqrt(-cross_val_score(reg, dataset, labels, cv=cv, n_jobs=-1, scoring='neg_mean_squared_error'))\n pc_list = cross_val_score(reg, dataset, labels, cv=cv, n_jobs=-1, scoring='r2')\n\n print(mae_list)\n print(rmse_list)\n print(pc_list)\n\n print('=========The Mean Absolute Error of Model is {0}========='.format(np.mean(mae_list)))\n print('=========The Root Mean Square Error of Model is {0}========='.format(np.mean(rmse_list)))\n print('=========The Pearson Correlation of Model is {0}========='.format(np.mean(pc_list)))\n\n mkdirs_if_not_exist('./model')\n joblib.dump(reg, \"./model/BayesRidge_SCUT-FBP.pkl\")\n print('The regression model has been persisted...')", "def get_scores(self):\n hist = self.confusion_matrix\n # hist = [TN,FP;FN,TP]\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n # iou = iu.sum() / self.n_classes\n mean_iou = np.nanmean(iu) # if classes = 2: iou = miou\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n cls_iou = dict(zip(range(self.n_classes), iu))\n\n ##############################################\n tn = hist[0, 0]\n tp = np.diag(hist).sum() - tn\n fp = np.triu(hist, 1).sum()\n fn = np.tril(hist, -1).sum()\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f1 = 2 * precision * recall / (precision + recall)\n\n # for medical img, img_seg \\in [0,1]\n dice = 2 * tp / (tp + tp + fn + fp)\n # dice = f1-score\n dsc = 2 * tp / (tp + fn + fp)\n # dsc = jaccard\n # voe = 2 * abs(fp + fn) / (tp + tp + fn + fp)\n # voe = 1 - dsc\n\n k2 = {\n # \"Overall Acc: \\t\": acc,\n 'Mean Acc': float(judge_nan(acc_cls)),\n # \"FreqW Acc : \\t\": fwavacc,\n 'Mean IoU': float(judge_nan(mean_iou)),\n 'F1-score': float(judge_nan(f1)),\n 'DSC': float(judge_nan(dsc)),\n 'Precision': float(judge_nan(precision)),\n 'Recall': float(judge_nan(recall)),\n }\n\n return k2", "def do_crossval():\n df = read_df()\n # X = df['review'].apply(remove_html_lower)\n\n X = df['review']\n y = df['sentiment']\n X_train, X_holdout, y_train, y_holdout = train_test_split(X, y, test_size=0.3, shuffle=True, stratify=y, random_state=222 )\n\n tfidf = TfidfVectorizer(stop_words='english', min_df=2, max_df=0.8, ngram_range=(1,4))\n stem_pipeline = make_pipeline(TextNormalizer(), tfidf, LogisticRegression(C=100))\n cv = StratifiedShuffleSplit(n_splits=3, test_size=0.2)\n\n scores = cross_val_score(stem_pipeline, X_train, y_train, cv=cv, scoring='accuracy', n_jobs=-1)\n print(scores, scores.mean())", "def csls(scores, knn = 5):\n def mean_similarity(scores, knn, axis = 1):\n nghbs = np.argpartition(scores, -knn, axis = axis) # for rows #[-k:] # argpartition returns top k not in order but it's efficient (doesnt sort all rows)\n # TODO: There must be a faster way to do this slicing\n if axis == 1:\n nghbs = nghbs[:,-knn:]\n nghbs_score = np.concatenate([row[indices] for row, indices in zip(scores, nghbs)]).reshape(nghbs.shape)\n else:\n nghbs = nghbs[-knn:,:].T\n nghbs_score = np.concatenate([col[indices] for col, indices in zip(scores.T, nghbs)]).reshape(nghbs.shape)\n\n return nghbs_score.mean(axis = 1)\n # 1. Compute mean similarity return_scores\n src_ms = mean_similarity(scores, knn, axis = 1)\n trg_ms = mean_similarity(scores, knn, axis = 0)\n # 2. Compute updated scores\n normalized_scores = ((2*scores - trg_ms).T - src_ms).T\n return normalized_scores", "def calculate_avg_score(state_score,state_count):\n\tfor state in state_score.keys():\n\t\tstate_score[state] = 1.*state_score[state]/state_count[state]\n\treturn state_score", "def score(model):\n # get the first layer\n layer = model.get_layer('encoder')\n # extracts weights\n weights = layer.get_weights()[0]\n # calculate the infinity norm as shown in the paper.\n # For each input feature get the absolute maximum weight\n # connected with this feature\n scores = np.linalg.norm(weights, ord=np.inf, axis=1)\n # the final score is a importance measure for each feature\n sorted_scores = sorted(range(len(scores)), key=lambda k: scores[k])\n return sorted_scores[::-1]", "def cross_validate(X, Y, folds=5):\n\n log = LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, max_iter=200, multi_class='ovr', n_jobs=3,\n penalty='l2', random_state=None, solver='liblinear', tol=0.0001,\n verbose=0, warm_start=False)\n \n\n \n\n\n scores_log = [] \n scores_forest = []\n index = np.arange(X.shape[0])\n score_log = 0\n score_forest = 0\n \n for i in range(folds):\n score_log = 0\n score_forest = 0\n \n test_index = np.random.choice(index, int(X.shape[0]*1/folds),replace=False)\n index = np.setdiff1d(np.arange(X.shape[0]),test_index)\n \n test_x = X[test_index]\n test_y = Y[test_index]\n\n log.fit(X[index],Y[index])\n pred_log = log.predict(test_x)\n \n ran.fit(X[index],Y[index])\n pred_ran = ran.predict(test_x)\n \n for i in range(len(test_y)):\n if(pred_log[i] == test_y[i]):\n score_log += 1\n if(pred_ran[i] == test_y[i]):\n score_forest += 1\n scores_log.append(score_log/len(test_y))\n scores_forest.append(score_forest/len(test_y))\n \n\n return (np.mean(scores_log),np.mean(scores_forest))", "def get_mean_score(rating_scores):\n return sum(rating_scores) / len(rating_scores)", "def Naive_Byse_k_folds(train_p):\n all_ex, att = make_examples(copy.deepcopy(train_p))\n F2I = parseAttributes(train_p[0])\n k=5\n accuracy = 0\n data = dev_train_sep(k,data=all_ex)\n for i in range(k):\n dev = data[i]\n train =[]\n for j in range(k):\n if not j == i:\n train += data[j]\n naive_bayes = NaiveBayes(train, dev, attributes=att, F2I=F2I)\n acc= naive_bayes.naiveBayes()\n accuracy +=acc\n avg_acu = \"{0:.2f}\".format(accuracy / k)\n print(\"Naive Byse : \" + str(avg_acu))\n return avg_acu", "def timeseriesCVscore(self, params):\n errors = []\n\n # values = series.values\n values = self.train_ts\n self.alpha, self.beta, self.gamma = params\n\n # set the number of folds for cross-validation\n tscv = TimeSeriesSplit(n_splits=3)\n\n # iterating over folds, train model on each, forecast and calculate error\n for train, test in tscv.split(values):\n\n self.train = values[train]\n self.test = values[test]\n self.triple_exponential_smoothing()\n predictions = self.result[-len(self.test) :]\n actual = values[test]\n error = mape(list(actual), predictions)\n errors.append(error)\n\n # print \"error: \"\n # print errors\n return np.mean(np.array(errors))", "def score_models(models, X,y):\n print(\"评价每个模型.\")\n for name,model in models.items():\n score = cross_val_score(model,X,y,scoring='roc_auc',cv=5)\n mean_score=np.mean(score)\n print(\"{}: {}\" .format(name, mean_score))\n print(\"Done.\\n\")", "def evaluate_ucf50():\n fv_features = 'fv_ucf50_python/'\n accs = []\n groups, full, sets = utility.split_data(fv_features, suffix='_fv.npy.gz',\n useLooCV=False)\n for i in xrange(5):\n ts = time.time()\n features_train, features_test, labels_train, labels_test = \\\n utility.load_groups(\n groups,np.setdiff1d(full,sets[i]),\n sets[i], scale=False, verbose=False)\n\n clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=6)\n clf.fit(features_train, labels_train)\n acc = clf.score(features_test, labels_test)\n print \"Fold %d accuracy: %.3f\" % (i, acc)\n print \"Train & testing time %.3f\" % (time.time() - ts)\n accs.append(acc)\n\n with open('fv_ucf50_accs_5fold.txt', 'w') as f:\n f.write(\"%s\\nMean:%.3f\" % (str(accs), np.mean(accs)))", "def scoring(self):\n pass", "def scoring(estimator, features_test, labels_test):\n pred = estimator.predict(features_test)\n p = metrics.precision_score(labels_test, pred, average='micro')\n r = metrics.recall_score(labels_test, pred, average='micro')\n if p > 0.3 and r > 0.3:\n return metrics.f1_score(labels_test, pred, average='macro')\n return 0", "def kaggle_metric(predictions, exact_values):\n norm = 70.*len(predictions)\n \n score = 0.\n for p,e in zip(predictions,exact_values):\n score += N.sum((heaviside(p)-heaviside(e))**2)\n\n return score/norm", "def findRFBestN():\n resultList = []\n BestScore = 0\n nList = [ n for n in range(1,200) if n%10 == 0]\n for n in nList:\n rforest = ensemble.RandomForestClassifier(max_depth=5, n_estimators=n)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n rforest = rforest.fit(cv_data_train, cv_target_train)\n trainng_score += [rforest.score(cv_data_train,cv_target_train)]\n testing_score += [rforest.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_n = n\n resultList += [[n, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding n_estimator is: ')\n return BestScore, best_n", "def average_precision(ranking, references, atk=None):\n total, num_correct = 0.0, 0.0\n for k, prediction in enumerate(ranking[:atk], 1):\n if prediction in references:\n num_correct += 1\n total += num_correct / k\n return total / num_correct if total > 0 else 0.0", "def cross_validate(cv, x, y, k=1):\n indices = np.arange(len(x))\n np.random.shuffle(indices)\n stepsize = int(len(x) / cv)\n metrics = np.zeros(4)\n for i in range(cv):\n logging.info(f\"Cross-validation fold {i+1}\")\n\n # Slice test set out of data\n test_indices = indices[i*stepsize:i*stepsize+stepsize]\n x_test = x[test_indices]\n y_test = y[test_indices]\n\n # Everything else is the training set\n x_train = np.copy(x)\n x_train = np.delete(x_train, test_indices, axis=0)\n y_train = np.copy(y)\n y_train = np.delete(y_train, test_indices, axis=0)\n\n metrics += evaluate(knn(x_test, x_train, y_train, k), y_test)\n metrics /= cv\n\n print(metrics)\n return metrics", "def inference(train_X, test_X, y, model_params=None, folds=10):\n\n y_oof = np.zeros(train_X.shape[0]) # oof pred\n test_preds = np.zeros(test_X.shape[0]) # kfold pred\n score = 0 # average of kfold(AUC score)\n \n # -- Stratified KFold\n skf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=444)\n for fold, (train_idx, valid_idx) in enumerate(skf.split(train_X, y)):\n X_train, X_valid = train_X.loc[train_idx, :], train_X.loc[valid_idx, :]\n y_train, y_valid = y[train_idx], y[valid_idx]\n \n X_train = X_train.to_numpy()\n X_valid = X_valid.to_numpy()\n y_train = y_train.to_numpy()\n y_valid = y_valid.to_numpy()\n\n X_test = test_X.to_numpy()\n\n print(f'fold: {fold+1}, X_train.shape: {X_train.shape}, X_valid.shape: {X_valid.shape}')\n\n # -- Catboost, train\n clf = CatBoostClassifier(**model_params) \n clf.fit(\n X_train, y_train, \n eval_set=(X_valid, y_valid),\n early_stopping_rounds=50, \n verbose=20\n )\n\n # -- Prediction/Validation/Scoring\n valid_preds = clf.predict_proba(X_valid)[:, 1]\n y_oof[valid_idx] = valid_preds\n\n print(f\"Fold {fold + 1} | AUC: {roc_auc_score(y_valid, valid_preds)}\")\n print('-'*80)\n\n score += roc_auc_score(y_valid, valid_preds) / folds\n test_preds += clf.predict_proba(X_test)[:, 1] / folds\n \n del X_train, X_valid, y_train, y_valid\n gc.collect()\n \n print(f\"\\nMean AUC = {score}\") # validation score\n print(f\"OOF AUC = {roc_auc_score(y, y_oof)}\") # oof validation\n \n return y_oof, test_preds", "def CV_fit(model, data, datasets, folds=5, random_state: int=None):\n kf = KFold(n_splits=folds, shuffle=False, random_state=random_state)\n kf = kf.split(X=data[0])\n\n # Fit k models and store them\n results = []\n for train_ids, test_ids in kf:\n scaler = pka_scaler(data[1][train_ids])\n if model.data_type == 'descriptors':\n desc_scaler = StandardScaler()\n desc_scaler.fit(data[0][train_ids])\n data[0] = desc_scaler.transform(data[0])\n fold_model = train(model, train_ids, data, scaler, datasets)\n fold_result = test(model, fold_model, test_ids, data, scaler)\n\n results.append(fold_result)\n avg_result = np.mean(results, axis=0)\n return avg_result, results", "def calc_mean_score(movies: List[Movie]) -> float:\n return round(sum([m.score for m in movies]) / len(movies), 1)", "def outer_validation(self, cvs=6, nested_cv=False, **kwargs):\n skf = StratifiedKFold(n_splits=cvs, **kwargs)\n split_pool = skf.split(self.x_matrix, self.y_vector)\n\n model = RandomizedSearchCV(self.pipeline, **self.optim_params)\n if nested_cv:\n self.estimator = model\n else:\n model.fit(self.x_matrix, self.y_vector)\n self.estimator = model.best_estimator_\n\n # XXX: possible wrong logics\n if self.training_report_pool is None:\n self.training_report_pool = [\n dict(\n Scorer=model.scorer_,\n Params=model.get_params(),\n Best_params=model.best_params_,\n Best_score=model.best_score_,\n Best_index=model.best_index_,\n Cross_validations=model.cv_results_,\n Best_estimator=model.best_estimator_,\n Estimator_score=None\n )\n ]\n\n if self.model_pool is None:\n self.model_pool = []\n\n if self.area_under_curve_pool is None:\n self.area_under_curve_pool = []\n\n if self.training_report_pool is None:\n self.training_report_pool = []\n\n if self.feature_importance_pool is None:\n self.feature_importance_pool = {\n name: [0] * cvs for name in self.x_matrix.columns\n }\n\n for cv_idx, split in enumerate(split_pool):\n estimator = copy.deepcopy(self.estimator)\n training_report, auc, feature_importance, model \\\n = self.randomized_search_cv(estimator, split)\n\n self.model_pool.append(model)\n self.area_under_curve_pool.append(auc)\n\n if training_report:\n self.training_report_pool.append(training_report)\n\n for name, importance in feature_importance.items():\n self.feature_importance_pool[name][cv_idx] = importance", "def calculate_average_precision_score(model, evaluation_data, train_data, opt_data):\n return __calculate_classification_metrics(model, \"avg-precision\", evaluation_data, train_data, opt_data)", "def randomforest_cv(self, nsplits: int = 5) -> (float, float, float):\r\n params = {\r\n \"n_estimators\": [20, 50, 100, 200],\r\n \"max_depth\": [2, 3, 5, 8, 10, 15, 20],\r\n }\r\n model = RandomForestClassifier()\r\n gridcv = GridSearchCV(model, params, cv=nsplits)\r\n gridcv.fit(self.x, self.y)\r\n best_params = gridcv.best_params_\r\n cv = KFold(n_splits=nsplits)\r\n acc_result = []\r\n for train, test in cv.split(self.x):\r\n x_train = self.x[train, :]\r\n x_test = self.x[test, :]\r\n y_train = self.y[train]\r\n y_test = self.y[test]\r\n model = RandomForestClassifier(**best_params).fit(x_train, y_train)\r\n y_predict = model.predict(x_test)\r\n acc_result.append(binary_acc(y_test, y_predict))\r\n return np.mean(acc_result), np.std(acc_result), best_params", "def evaluate_num_centres(\n inputs, targets, folds, scale, reg_param, num_centres_sequence=None):\n # fix the reg_param\n reg_param = 0.01\n # fix the scale\n scale = 100\n # choose a range of numbers of centres\n if num_centres_sequence is None:\n num_centres_sequence = np.arange(5,200)\n num_values = num_centres_sequence.size\n print(num_values)\n num_folds = len(folds)\n #\n # create some arrays to store results\n train_mean_errors = np.zeros(num_values)\n test_mean_errors = np.zeros(num_values)\n train_stdev_errors = np.zeros(num_values)\n test_stdev_errors = np.zeros(num_values)\n # \n # run the experiments\n for c, num_centres in enumerate(num_centres_sequence):\n centres = np.linspace(0,1,num_centres)\n feature_mapping = construct_rbf_feature_mapping(centres,scale)\n designmtx = feature_mapping(inputs)\n # r is the index of reg_param, reg_param is the regularisation parameter\n # cross validate with this regularisation parameter\n train_errors, test_errors = cv_evaluation_linear_model(\n designmtx, targets, folds, reg_param=reg_param)\n # we're interested in the average (mean) training and testing errors\n train_mean_error = np.mean(train_errors)\n test_mean_error = np.mean(test_errors)\n train_stdev_error = np.std(train_errors)\n test_stdev_error = np.std(test_errors)\n # store the results\n train_mean_errors[c] = train_mean_error\n test_mean_errors[c] = test_mean_error\n train_stdev_errors[c] = train_stdev_error\n test_stdev_errors[c] = test_stdev_error\n #\n # Now plot the results\n fig, ax = plot_train_test_errors(\n \"Num. Centres\", num_centres_sequence, train_mean_errors, test_mean_errors)\n # Here we plot the error ranges too: mean plus/minus 1 standard error.\n # 1 standard error is the standard deviation divided by sqrt(n) where\n # n is the number of samples. \n # (There are other choices for error bars.)\n # train error bars\n lower = train_mean_errors - train_stdev_errors/np.sqrt(num_folds)\n upper = train_mean_errors + train_stdev_errors/np.sqrt(num_folds)\n ax.fill_between(num_centres_sequence, lower, upper, alpha=0.2, color='b')\n # test error bars\n lower = test_mean_errors - test_stdev_errors/np.sqrt(num_folds)\n upper = test_mean_errors + test_stdev_errors/np.sqrt(num_folds)\n ax.fill_between(num_centres_sequence, lower, upper, alpha=0.2, color='r')\n\n return train_mean_errors", "def compute_score(self, observation, prediction, verbose=False):\n #print(observation)\n score = TScore.compute( self.observation, prediction )\n print(\"compute_score\")", "def cross_validate(pipeline, data, cv=4):\n print \"Running cross validation...\"\n (Xcv, ycv) = data\n kfold = KFold(n_splits=cv, shuffle=True, random_state=42)\n results = []\n for train_idx, val_idx in kfold.split(Xtrain):\n pipeline.fit(Xcv[train_idx], ycv[train_idx])\n results.append(accuracy_score(\n ycv[val_idx], pipeline.predict(Xcv[val_idx])\n ))\n print \"{} +/- {}\".format(np.mean(results), np.std(results))", "def objective(params, n_folds = N_FOLDS):\n # Perform n_fold cross validation with hyperparameters\n # Use early stopping and evalute based on ROC AUC\n cv_results = lgb.cv(params, lgb_train, nfold = n_folds, num_boost_round = 10000, early_stopping_rounds = 100, metrics = 'auc', seed = 50)\n # Extract the best score\n best_score = max(cv_results['auc-mean'])\n # Loss must be minimized\n loss = 1 - best_score\n # Dictionary with information for evaluation\n return {'loss': loss, 'params': params, 'status': STATUS_OK}", "def nested_cv(X, y, model, n_splits, n_folds, unique_id):\n \n cv = StratifiedKFold(n_splits=n_splits,\n shuffle=True,\n random_state=42) # Outer CV\n \n i_start = 0\n i_list = []\n results_df = None\n cv_path = unique_id + '_NestedCV.pkl'\n \n if os.path.isfile(cv_path) == True: # If CV is incomplete, resume\n results_df = pd.read_pickle(cv_path)\n i_start = results_df.Outer_fold.max() + 1\n print('Resuming cross-validation from fold ' + str(i_start + 1))\n \n # Generate indices to split data by StratifiedKFold\n # Append indices for each fold to list \n for tr_i, te_i in cv.split(X,y):\n i_list.append([tr_i, te_i])\n \n # For each fold...\n for i in range(i_start, len(i_list)):\n results_list = []\n print('Beginning fold ' + str(i+1) + ' of ' + str(len(i_list)))\n \n # Split data into training and test tests\n X_train = X.loc[X.index.intersection(i_list[i][0])]\n y_train = y.loc[y.index.intersection(i_list[i][0])]\n X_test = X.loc[X.index.intersection(i_list[i][1])]\n y_test = y.loc[y.index.intersection(i_list[i][1])]\n\n start = time.time()\n \n # Fit the HyperoptEstimator to training data (optimise model)\n model.fit(X_train,\n y_train,\n n_folds=n_folds, # Inner stratified k-fold CV\n cv_shuffle=True)\n \n end = time.time()\n duration = end - start\n\n # Use optimised model to predict labels for test data\n y_pred = model.predict(X_test)\n score = f1_score(y_test, y_pred, average='weighted') # Evaluate\n \n # Everything below: formats and/or calculates results for output file\n sorted_labels = np.sort(y_test.unique())\n unweighted_score = f1_score(y_test, y_pred,\n average=None,\n labels=sorted_labels)\n c_matrix = confusion_matrix(y_test, y_pred,\n labels=sorted_labels)\n\n for trial in range(len(model.trials.trials)):\n if model.trials.trials[trial].get('result').get('status') == 'ok':\n trial_loss = model.trials.trials[trial].get('result').get('loss')\n trial_duration = model.trials.trials[trial].get('result').get('duration')\n else:\n trial_loss = np.nan\n trial_duration = np.nan\n \n results_list.append([i,\n score,\n unweighted_score,\n le.inverse_transform(sorted_labels),\n c_matrix,\n duration,\n trial,\n trial_loss,\n trial_duration])\n \n append_df = pd.DataFrame(results_list,\n columns=['Outer_fold',\n 'Outer_score',\n 'Outer_unweighted_scores',\n 'Outer_unweighted_score_labels',\n 'Outer_confusion_matrix',\n 'Outer_training_duration',\n 'Trial',\n 'Trial_loss',\n 'Trial_duration'])\n if i == i_start:\n if results_df is not None:\n final_df = pd.concat([results_df,\n append_df],\n ignore_index=True)\n else:\n final_df = append_df\n final_df.to_pickle(cv_path)\n \n else:\n results_df = pd.read_pickle(cv_path)\n final_df = pd.concat([results_df,\n append_df],\n ignore_index=True)\n final_df.to_pickle(cv_path)", "def ex_3_a(x_train, y_train, x_test, y_test):\n ###########\n ## TODO:\n ## Train multi-class SVMs with one-versus-rest strategy with\n ## - linear kernel\n ## - rbf kernel with gamma going from 10**-5 to 10**5\n ## - plot the scores with varying gamma using the function plot_score_versus_gamma\n ## - Note that the chance level is not .5 anymore and add the score obtained with the linear kernel as optional argument of this function (parameter baseline)\n ###########\n\n gamma_range = [10**-5, 10**-4, 10**-3, 10**-2, 10**-1, 10**0, 10**1, 10**2, 10**3, 10**4, 10**5]\n\n lin = svm.SVC(decision_function_shape='ovr', kernel='linear', C=10)\n lin.fit(x_train, y_train)\n\n score_train = lin.score(x_train, y_train)\n score_test = lin.score(x_test, y_test)\n\n gam_score_train = []\n gam_score_test = []\n for gamma_value in gamma_range:\n gam = svm.SVC(decision_function_shape='ovr', kernel='rbf', gamma=gamma_value, C=10)\n gam.fit(x_train, y_train)\n\n gam_score_train.append(gam.score(x_train, y_train))\n gam_score_test.append(gam.score(x_test, y_test))\n\n plot_score_vs_gamma(gam_score_train, gam_score_test, gamma_range, score_train, score_test, baseline=0.2)", "def best_threshold_from_folds(y_tuples, scoring=f1_score, step_size=0.01, maximize=True):\n thresholds, scores = [], []\n for _, y_true, y_pred in y_tuples:\n t, s = find_best_threshold(y_true, y_pred, step_size, scoring, maximize=maximize)\n thresholds.append(t)\n scores.append(s)\n\n mean_threshold = np.mean(thresholds)\n mean_score = np.mean([score_for_threshold(y, y_hat, scoring, mean_threshold) for _, y, y_hat in y_tuples])\n return mean_threshold, mean_score", "def run_cv_pred(X, y, clf, n_folds):\n # Construct a kfolds object\n skf = StratifiedKFold(n_splits=n_folds)\n splits = skf.split(X, y)\n y_pred = y.copy()\n\n # Iterate through folds\n for idx, (train_index, test_index) in enumerate(splits):\n X_train, X_test = X[train_index], X[test_index]\n y_train = y[train_index]\n # Initialize a classifier with key word arguments\n clf.fit(X_train, y_train)\n try: # Gradient boosted trees do not accept sparse matrices in the predict function currently\n preds = clf.predict(X_test)\n except TypeError:\n preds = clf.predict(X_test.todense())\n y_pred[test_index] = preds\n\n return y_pred", "def print_score_from_restored_model(clf, X_test, y_test):\n y_predicted = clf.predict(X_test)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_test]\n\n # calc FNC score\n fold_score, _ = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n\n print(\"FNC-1 score from restored model: \" + str(score) +\"\\n\")\n\n return score", "def get_avg_f1(ccs):\n scorers = ccs.scorers\n f1_scores = []\n for scorer in scorers:\n f1_scores.append(scorer.get_f1())\n\n avg_f1 = 100 * np.average(f1_scores)\n\n return avg_f1", "def experiment(improved):\n\n N_list = [5 ,10 ,20]\n K_list = [3 , 7 ,9]\n P_list = [0.3 , 0.4 , 0.5 ,0.6 ,0.7]\n\n data = pandas.read_csv('train.csv')\n\n avg_list = []\n for i in range(0,len(N_list) * len(K_list) * len(P_list)):\n avg_list.append([0 , None])\n\n kf = KFold(n_splits=5, shuffle=True, random_state=209418441)\n rotation_index = 1\n for train_index, test_index in kf.split(data):\n\n train = data.iloc[train_index]\n test = data.iloc[test_index]\n index = 0\n for n in N_list:\n for k in K_list:\n for p in P_list:\n\n #print('testing for N= ',n,', K = ',k, 'P = ',p)\n KNN = forest.KNN_forest(N=n, K=k, P=p, data = train , improved=improved)\n success_rate = utls.tests.succ_rate_test.test(test,KNN.Classify)\n avg_list[index][0] += success_rate\n avg_list[index][1] = (n,k,p)\n #print(' rate is: ',avg_list[index][0]/rotation_index)\n index += 1\n rotation_index +=1\n\n\n\n best_option = max(avg_list,key= lambda x:x[0])\n #print(' ****** DONE ******')\n #print('best n,k,p are : ' , best_option[1] , ' with success rate: ' , best_option[0])\n\n return best_option[1]", "def _eed_compute(sentence_level_scores: List[Tensor]) ->Tensor:\n if len(sentence_level_scores) == 0:\n return tensor(0.0)\n average = sum(sentence_level_scores) / tensor(len(sentence_level_scores))\n return average", "def update_cross_validate_scores(cv_scores):\n # TODO: move this func to cvrun.py (rename cvrun.py utils_cv.py)\n cv_folds = len(list(cv_scores.values())[0])\n\n df = cv_scores_to_df(cv_scores, decimals=3, calc_stats=False)\n\n # Add `metric` col\n v = list(map(lambda x: '_'.join(x.split('_')[1:]), df.index))\n df.insert(loc=0, column='metric', value=v)\n\n # Convert `neg` metric to positive and update metric names (drop `neg_`)\n # scikit-learn.org/stable/modules/model_evaluation.html --> explains the `neg` in `neg_mean_absolute_error`\n idx_bool = [True if 'neg_' in s else False for s in df['metric']]\n for i, bl in enumerate(idx_bool):\n if bl:\n df.iloc[i, -cv_folds:] = abs(df.iloc[i, -cv_folds:])\n df['metric'] = df['metric'].map(lambda s: s.split('neg_')[-1] if 'neg_' in s else s)\n\n # Add `tr_set` col\n v = list(map(lambda x: True if 'train' in x else False, df.index))\n df.insert(loc=1, column='tr_set', value=v)\n return df", "def Bayes_prediction(X, y, fold_number=10):\n D = X.shape[1]\n fold = KFold(n_splits=fold_number)\n cross_tab_all = []\n lamb_hat_all = []\n \n for train_index, test_index in fold.split(X, y):\n X_train, X_test = X.iloc[train_index], X.iloc[test_index]\n y_train, y_test = y.iloc[train_index], y.iloc[test_index]\n length = X_train.shape[0]\n pi_hat = y_train.mean()\n lamb_hat = np.zeros((2, D))\n \n for flag in range(2):\n for d in range(D):\n lamb_hat[flag][d] = (sum(X_train.iloc[i][d] * (y_train.iloc[i]==flag) for i in range(length))) / (sum(y_train.iloc[i]==flag for i in range(length)))\n\n y_pred = np.zeros(len(X_test))\n for i in range(len(X_test)):\n y_pred[i] = Bayes_classifier(pi_hat, X_test.iloc[i], lamb_hat)\n \n cross_tab = np.zeros((2, 2))\n for m in [0, 1]:\n for n in [0, 1]:\n cross_tab[m][n] = sum([(y_test.values[i]==m) & (y_pred[i]==n) for i in range(len(y_pred))]) \n \n cross_tab_all.append(cross_tab)\n lamb_hat_all.append(lamb_hat)\n \n cross_tab_all = sum(cross_tab_all)\n lamb_hat_all\n\n return lamb_hat_all, cross_tab_all" ]
[ "0.7508548", "0.68642884", "0.686009", "0.6607522", "0.65247214", "0.65219957", "0.65063673", "0.6505322", "0.65024954", "0.6391796", "0.6380426", "0.63793", "0.6310294", "0.6234901", "0.6214619", "0.61726385", "0.6169891", "0.6133121", "0.6118932", "0.6067403", "0.6057782", "0.5987329", "0.5977016", "0.5973731", "0.59702563", "0.59595907", "0.5952543", "0.5947681", "0.594438", "0.593201", "0.59237987", "0.5921839", "0.5909228", "0.58880204", "0.5849378", "0.58407456", "0.5832072", "0.5826689", "0.5808526", "0.58063203", "0.57975954", "0.57972103", "0.5786251", "0.57778186", "0.5760345", "0.575347", "0.57487845", "0.5748383", "0.5746837", "0.5746688", "0.5735533", "0.5734606", "0.573364", "0.5729928", "0.5728364", "0.5722288", "0.5714543", "0.5710954", "0.5709983", "0.57021886", "0.5701597", "0.5699505", "0.56924075", "0.56907177", "0.56815743", "0.5666982", "0.5663051", "0.56626815", "0.566029", "0.5658517", "0.5651485", "0.5647528", "0.564686", "0.5646218", "0.56395215", "0.56289566", "0.5622856", "0.56169915", "0.5615783", "0.561531", "0.56063884", "0.56008226", "0.5594512", "0.55944777", "0.55935454", "0.5592996", "0.5581973", "0.557891", "0.5578059", "0.5572226", "0.55717313", "0.5562593", "0.55603033", "0.555828", "0.5555305", "0.5553261", "0.5553209", "0.55531895", "0.55531174" ]
0.63403803
12
Fit the decision tree model.
def _fit(self, data): train_in, train_labels = self._split_inputs_outputs(data) clf = DecisionTreeClassifier(min_samples_leaf=0.05) clf.fit(train_in, train_labels) return clf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_decision_tree(model, x_train, y_train):\r\n model.fit(x_train, y_train)\r\n score = model.score(x_train, y_train)\r\n importance = model.feature_importances_\r\n return score, importance", "def fit(self, X, y):\n #Initialize the tree with the given data\n tree = kd(X)\n self.tree = tree\n self.labels = y", "def train_decision_tree():\n train_model(DecisionTreeRegressor(max_depth=3, random_state=42),\n dataset_file_name=DECISION_TREE_DEFAULT_DATASET,\n model_file_name=DECISION_TREE_DEFAULT_MODEL)", "def fit(self, dataSet, prune=False, validSet=None):\n\t\t\n\t\tmodel_args = self._model_complexity_args.copy()\n\t\tif prune:\n\t\t\tif type(validSet).__name__ != 'ndarray':\n\t\t\t\traise AttributeError(\"To make pruning, validation set accept 'ndarray'\\\n\t\t\t\t\t, cannot be {}!\".format(type(validSet).__name__))\n\t\t\t# get a fully-grown tree\n\t\t\tmodel_args['min_impurity_decrease'] = 0\n\t\t\tmodel_args['min_samples_split'] = 2\n\t\t\n\t\tif self._treeType == 'reg':\n\t\t\timpurity_crit = DecisionTree._MSE\n\t\telif self._treeType == 'clf':\n\t\t\timpurity_crit = DecisionTree._Gini\n\n\n\t\telse:\n\t\t\traise ValueError(\"Argument 'treeType' accept 'clf' or 'reg' only\")\n\t\tself._root = DecisionTree._createTree(dataSet, impurity_crit=impurity_crit,\n\t\t\t\t\t\t\t\t\t\t\t**model_args)\n\n\t\tprint(\"Decision Tree Generated!\")\n\n\t\tif prune:\n\t\t\tprint(\"Pruning...\")\n\t\t\ttreeSeq = {'tree':[self._root], 'alpha':[0], 'num_leaves': [self._root.leaves()]} \n\t\t\tpruned_tree = DecisionTree._prune(deepcopy(self._root), impurity_crit, dataSet, treeSeq)\n\t\t\tprint('Pruning Done: %d pruned sub tree got' % len(treeSeq['tree']))\n\t\t\tprint('choosing best subtree through validation set...')\n\t\t\tbestSubtree, error_score = DecisionTree._bestSubtree(treeSeq, impurity_crit, validSet)\n\t\t\tprint('best subtree selected with error score: {}'.format(error_score))\n\n\t\t\tself._root = bestSubtree", "def fit(self, X, y):\n assert len(y.shape) == 2 and len(y) == len(X), 'Wrong y shape'\n self.criterion, self.classification = self.all_criterions[self.criterion_name]\n if self.classification:\n if self.n_classes is None:\n self.n_classes = len(np.unique(y))\n y = one_hot_encode(self.n_classes, y)\n\n self.root = self.make_tree(X, y, 0)", "def select_model():\r\n from sklearn import tree\r\n import graphviz\r\n\r\n ValidationSetAndLabels = AllSets[1]\r\n ValLabels = ValidationSetAndLabels[:, [-1]] # extract labels (last column)\r\n ValSet = np.delete(ValidationSetAndLabels, -1, axis=1) # delete labels\r\n\r\n TrainingSetAndLabels = AllSets[2]\r\n TrainLabels = TrainingSetAndLabels[:, [-1]] # extract labels (last column)\r\n TrainSet = np.delete(TrainingSetAndLabels, -1, axis=1) # delete labels\r\n\r\n \"\"\"\r\n This is the code to select the best hyperparameter (part b)\r\n\r\n for SplitCriterion in ['entropy', 'gini']:\r\n print \"Criterion: \" + SplitCriterion + '\\n'\r\n\r\n for MaxDepth in [int(depth) for depth in np.linspace(1, np.log2(TrainSet.shape[1]), 5)]:\r\n print \"max_depth: \" + str(MaxDepth) + '\\n'\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion=SplitCriterion, max_depth=MaxDepth)\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n print \"Accuracy for this test is: %f %%\" %Accuracy\r\n print '\\n'\r\n\r\n print '\\n'\r\n \"\"\"\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion='entropy', max_depth=12)\r\n\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n dot_data = tree.export_graphviz(MyTree, out_file=None, max_depth=2,\r\n feature_names=AllSets[3], filled=True, rounded=True, special_characters=True,\r\n class_names=TrainLabels.flatten().astype(str))\r\n graph = graphviz.Source(dot_data)\r\n graph.render(\"output\")", "def fit(self, X, y, sample_weight=None, check_input=True,\n X_idx_sorted=None):\n\n super(DecisionTreeClassifier, self).fit(\n X, y,\n sample_weight=sample_weight,\n check_input=check_input,\n X_idx_sorted=X_idx_sorted)\n return self", "def fit(self, x, y):\n\n # Make sure that x and y have the same number of instances\n assert x.shape[0] == len(y), \\\n \"Training failed. x and y must have the same number of instances.\"\n\n self.root = Node(x, y, depth=0)\n self.induce_decision_tree(self.root)\n\n self.is_trained = True", "def fit(self, X, y):\n features = 2 \n forest = [self.Tree(features) for i in range(self.n_estimators)] \n estimators = []\n \n for tree in forest:\n# mylist = list(range(len(X.columns)))\n# sample_index = np.random.choice(mylist, size=features , replace=True, p=None)\n# X_data = None \n# for j in range(len(sample_index)):\n# X_data = pd.concat([X_data, X[:, i]] , axis=1, ignore_index=True).reset_index() \n estimator = tree\n estimator.fit(X, y)\n estimators.append(estimator)\n self.estimators = estimators\n return", "def fit(self, X, y):\n features = 2 \n forest = [self.Tree(features) for i in range(self.n_estimators)] \n estimators = []\n \n for tree in forest:\n# mylist = list(range(len(X.columns)))\n# sample_index = np.random.choice(mylist, size=features , replace=True, p=None)\n# X_data = None \n# for j in range(len(sample_index)):\n# X_data = pd.concat([X_data, X[:, i]] , axis=1, ignore_index=True).reset_index() \n estimator = tree\n estimator.fit(X, y)\n estimators.append(estimator)\n self.estimators = estimators\n return", "def trainDecisionTree(inputDf, outputDf):\n clf = DecisionTreeRegressor(random_state=0)\n clf.fit(inputDf, outputDf)\n return clf", "def fit_tree_stump(X_train: np.ndarray, y_train: np.ndarray) -> tree.DecisionTreeClassifier:\n clf = tree.DecisionTreeClassifier()\n clf = clf.fit(X_train, y_train)\n return clf", "def fit(self, X, y):\n self.X = X.copy()\n self.y = y.copy()\n attributes = list(self.X.columns)\n\n # Adding an extra column in X for the output variable\n X.loc[:,\"Output\"] = self.y\n\n if (X.dtypes[0].name==\"category\"):\n if (y.dtype.name==\"category\"):\n # Discrete Input Discrete Output\n self.type = 0\n else:\n # Discrete Input Real Output\n self.criterion = \"information_gain\"\n self.type = 1\n \n # \"root\" is the name of the dummy root created for the tree\n self.Graph[\"root\"] = self.di(X,attributes,0)\n else:\n if (y.dtype.name==\"category\"):\n # Real Input Discrete Output\n self.type = 2\n else:\n # Real Input Real Output\n self.criterion = \"information_gain\"\n self.type = 3\n\n # \"root\" is the name of the dummy root created for the tree\n self.Graph[\"root\"] = self.ri(X,attributes,0)\n \n X.drop([\"Output\"],axis = 1,inplace = True)", "def fit(self, X):", "def fit(self, X_train, y_train):\n for i in range(self.N):\n h = RandomDecisionTree(candidate_splits=self.candidate_splits, depth=self.max_depth)\n h = h.fit(*self.bootstrap(X_train, y_train))\n self.learners.append(h)", "def fit(self, features, classes):\n\n self.root = self.__build_tree__(features, classes)", "def fit():\n pass", "def fit_model(self):\n logger.info('Fitting model')\n if self.traj_dict is None:\n self.traj_dict = self.get_traj_dict()\n self.model.fit(self.traj_dict.values())", "def fit_model(X, y):\n \n # Create cross-validation sets from the training data\n cv_sets = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.20, random_state = 0)\n\n # TODO: Create a decision tree regressor object\n regressor = DecisionTreeRegressor()\n\n # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10\n max_depth_values = [i for i in range(1,11)]\n params = {'max_depth':max_depth_values}\n\n #TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' \n scoring_fnc = make_scorer(performance_metric)\n\n # TODO: Create the grid search object\n grid = GridSearchCV(regressor,params,scoring = scoring_fnc)\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n # Return the optimal model after fitting the data\n return grid.best_estimator_", "def fit(self, x, y, minimum_gain=0):\n\n # Make sure that x and y have the same number of instances\n assert x.shape[0] == len(y), \\\n \"Training failed. x and y must have the same number of instances.\"\n\n #######################################################################\n # ** TASK 2.1: COMPLETE THIS METHOD **\n #######################################################################\n (classes, y_int) = np.unique(y, return_inverse=True)\n y_int = np.reshape(y_int, (-1, 1)) # we have to reshape y from 1d to 2d\n dataset = np.concatenate((x, y_int), axis=1)\n\n self.decision_tree = construct_tree(dataset, minimum_gain)\n\n # set a flag so that we know that the classifier has been trained\n self.is_trained = True\n\n return self.decision_tree", "def fit(self):\n raise NotImplementedError", "def fit_dtree(X, y):\n dtree = DecisionTreeRegressor(max_depth=2)\n dtree.fit(X, y)\n return dtree", "def __fit_model(self):\n\n labels = self.labeled_labels\n features = self.labeled_features\n\n pred = np.array(cross_val_predict(self.clf,\n features,\n labels,\n cv=self.cv))\n\n stats = self.__get_statistics(labels, pred)\n self.statistics.append(stats)\n\n self.clf.fit(features, labels)\n\n return self", "def fit(self, X, y):\n # TODO: Implement\n self.cols = X.columns\n self.nCols = len(self.cols)\n X = np.array(X)\n y = np.array(y)\n \n for i in range(X.shape[1]): \n uniq = np.unique(X[:, i])\n self.possible.append(list(uniq)) # add possible values\n self.valN.append(len(uniq)) # and how many\n index = np.argmax(self.valN)\n print(index)\n self.tree = self.buildTree(X, y)", "def decision_tree_classifier(features,target):\r\n clf = DecisionTreeClassifier()\r\n clf.fit(features, target)\r\n return clf", "def tree(self):\n\n tree_parameters = [{'min_samples_leaf': list(range(2, 10, 1)),\n 'criterion': ['mae', 'mse'],\n 'random_state': [1]}]\n tree_grid = GridSearchCV(estimator=DecisionTreeRegressor(),\n param_grid=tree_parameters,\n scoring=self.scorer, cv=5, n_jobs=-1,\n iid=False)\n tree_grid_result = tree_grid.fit(self.X_train, self.y_train)\n best_tree_parameters = tree_grid_result.best_params_\n tree_score = tree_grid_result.best_score_\n print('Best tree params: ' + str(best_tree_parameters))\n print('Tree score: ' + str(tree_score))\n return DecisionTreeRegressor(\n min_samples_leaf=best_tree_parameters['min_samples_leaf'],\n criterion=best_tree_parameters['criterion'],\n random_state=1)", "def fit_model(X, y):\n \n # Create cross-validation sets from the training data\n cv_sets = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.20, random_state = 0)\n\n # TODO: Create a decision tree regressor object\n regressor = DecisionTreeRegressor()\n\n # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10\n params = { 'max_depth' : [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]}\n\n # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' \n scoring_fnc = make_scorer(performance_metric)\n\n # TODO: Create the grid search object\n grid = GridSearchCV(regressor, params, scoring=scoring_fnc, cv=cv_sets)\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n # Return the optimal model after fitting the data\n return grid.best_estimator_", "def decision_tree(self, min_impurity_splits = None, is_voice_data = True):\n title = \"Learning Curves (Decision Tree - voice dataset)\"\n if not is_voice_data:\n title = \"Learning Curves (Decision Tree - EEG dataset)\"\n estimators = []\n for min_impurity_split in min_impurity_splits:\n estimator = tree.DecisionTreeClassifier(criterion=\"entropy\", \\\n min_impurity_split = min_impurity_split)\n estimators.append(estimator)\n\n # set colors: r -red, g- green, b - blue, m - magenta\n colors = [(\"r\", \"g\"), (\"b\", \"m\")] \n labels = [(\"Training accuracy (unpruned tree)\", \n \"Cross-validation accuracy (unpruned tree)\"),\n (\"Training accuracy (pruned tree)\", \n \"Cross-validation accuracy (pruned tree)\")]\n \n # Cross validation with 100 iterations to get smoother mean test and train\n # score curves, each time with 30% data randomly selected as a validation set.\n cv = cross_validation.ShuffleSplit(self.X.shape[0], n_iter=100,\n test_size=0.3, random_state=42)\n self.plot_learning_curve(estimators, title, labels, colors, self.X, self.y, \\\n cv=cv, n_jobs=4)\n \n # plot validation curve\n estimator_val = tree.DecisionTreeClassifier (criterion=\"entropy\") \n param_name = \"min_impurity_split\"\n x_label = \"Number of nodes in decision tree\"\n val_title = \"Validation Curve with Decision Tree (voice dataset)\"\n params =[i/100.0 for i in range(1,50)]\n if not is_voice_data:\n val_title = \"Validation Curve with Decision Tree (EEG dataset)\"\n params = np.logspace(-0.25, 0, 50)\n number_of_nodes = []\n for param in params:\n clf = tree.DecisionTreeClassifier(criterion=\"entropy\", min_impurity_split = param)\n clf.fit(self.X, self.y)\n number_of_nodes.append(clf.tree_.node_count)\n print number_of_nodes\n self.plot_validation_curve(estimator_val, params, param_name, self.X, \n self.y, val_title, xtricks = number_of_nodes, x_label = x_label,\n cv=cv, n_jobs = 4)\n plt.show()", "def decision_tree(data_frame, filename=0):\n\tprint \"Building decision tree...\"\n\tr = robjects.r\n\trpart = importr(\"rpart\")\n\tfit = rpart.rpart(\"category~bpm+speechiness+time_sig+key+duration+loudness+\\\n\t\t\tend_of_fade_in+start_of_fade_out+bpm_range+\\\n\t\t\tmax_bpm_spike+num_keys\", data=data_frame, method=\"class\", \n\t\t\tna_action='na.rpart', control='rpart.control(cp = .0001)')\n\trpart.printcp(fit)\n\tr.plot(fit, uniform=True, main=\"Classification Tree for Genre\")\n\tr.text(fit, use_n=True, all=True, cex=.8)\n\tif filename != 0:\n\t\trpart.post(fit, file=filename, title=\"Classification Tree for Genre\")\n\traw_input(\"> Press enter to continue.\")\n\treturn fit", "def fit(self, x):\n pass", "def fit(self, X):\n raise NotImplementedError", "def fit_model(X, y):\n\n # Create cross-validation sets from the training data\n # sklearn version 0.18: ShuffleSplit(n_splits=10, test_size=0.1, train_size=None, random_state=None)\n # sklearn versiin 0.17: ShuffleSplit(n, n_iter=10, test_size=0.1, train_size=None, random_state=None)\n cv_sets = ShuffleSplit(n_splits=10, test_size=0.20, random_state=42)\n\n # TODO: Create a decision tree regressor object\n regressor = DecisionTreeRegressor()\n\n # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10\n params = {'max_depth': np.arange(1, 11)}\n\n # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer'\n scoring_fnc = make_scorer(performance_metric)\n\n # TODO: Create the grid search cv object --> GridSearchCV()\n # Make sure to include the right parameters in the object:\n # (estimator, param_grid, scoring, cv) which have values 'regressor', 'params', 'scoring_fnc', and 'cv_sets' respectively.\n grid = GridSearchCV(regressor, params, scoring=scoring_fnc, cv=cv_sets)\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n # Return the optimal model after fitting the data\n return grid.best_estimator_", "def fit_model(X, y):\n \n # Create cross-validation sets from the training data\n cv_sets = ShuffleSplit(n_splits = 10, test_size = 0.20, random_state = 0)\n\n # TODO: Create a decision tree regressor object\n regressor = DecisionTreeRegressor()\n\n # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10\n params = {'max_depth':list(range(1,10))}\n\n # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' \n scoring_fnc = make_scorer(performance_metric)\n\n # TODO: Create the grid search cv object --> GridSearchCV()\n # Make sure to include the right parameters in the object:\n # (estimator, param_grid, scoring, cv) which have values 'regressor', 'params', 'scoring_fnc', and 'cv_sets' respectively.\n grid = GridSearchCV(regressor, params, scoring_fnc, cv=cv_sets)\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n # Return the optimal model after fitting the data\n return grid.best_estimator_", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def train_clf(x_train, y_train, clf_model=\"decision_tree\"):\n clf = classifiers[clf_model]\n clf.fit(x_train, y_train)\n return clf", "def __init__(self, max_depth=None, criterion='gini', random_state=0):\n print(\"Initialize the model Decision Tree Classifier... \")\n self.random_state = random_state\n self.model = tree.DecisionTreeClassifier(max_depth=max_depth, criterion=criterion, random_state=random_state)", "def fit(self, data, labels):\n self.clf.fit(data, labels)", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y):\n models = []\n bags_X = []\n bags_y = []\n for i in range(self.n_estimators):\n bag_X = []\n bag_y = []\n X_copy = X.values\n y_copy = y.values\n while len(bag_X) < len(X):\n index = randrange(len(X_copy))\n bag_X.append(X_copy[index])\n bag_y.append(y_copy[index])\n model = self.base_estimator\n model.fit(bag_X,bag_y)\n # print(tree.export_graphviz(model))\n models.append(copy.deepcopy(model))\n bags_X.append(bag_X)\n bags_y.append(bag_y)\n\n self.models = models\n self.X = X\n self.y = y\n self.bags_X = bags_X\n self.bags_y = bags_y\n return self", "def fit(self) -> None:\n\n levels = self.levels\n TSs = GetAggregateTS(self.data).aggregate(levels)\n models = {}\n residuals = {}\n fcsts = {}\n for bm in self.baseModels:\n model_name = bm.model_name\n if model_name is None: # only residuals and fcsts are provided\n models[bm.level] = None\n residuals[bm.level] = bm.residuals\n fcsts[bm.level] = bm.fcsts\n else:\n m = BASE_MODELS[model_name](\n data=TSs[bm.level],\n params=bm.model_params,\n )\n m.fit()\n models[bm.level] = m\n self.models = models\n self.info_fcsts = fcsts\n self.info_residuals = residuals", "def decision_tree(df, variables, test_size):\n from sklearn.model_selection import train_test_split\n from sklearn import tree\n\n # Define input\n X = encoding_df(df, variables)\n\n # Set validation\n y = df['target']\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)\n\n clf = tree.DecisionTreeRegressor()\n clf = clf.fit(X_train, y_train)\n\n print(compute_rmse(y_test, clf.predict(X_test)))\n return clf.predict(X_test), y_test", "def create_model(self):\n self.classifier = DecisionTreeClassifier(max_depth=1)", "def fit(self):\n raise NotImplementedError('')", "def fit_and_test(X, y) -> None:\r\n models = {\r\n \"tree2\": RandomForestClassifier(n_estimators=1, n_jobs=-1, class_weight=\"balanced\", random_state=0),\r\n \"tree1\": RandomForestClassifier(n_estimators=1, n_jobs=-1, random_state=0, criterion=\"entropy\"),\r\n \"random_forest_10\": RandomForestClassifier(\r\n n_estimators=10, n_jobs=-1, class_weight=\"balanced\", criterion=\"gini\"\r\n ),\r\n \"random_forest_100\": RandomForestClassifier(n_estimators=100, n_jobs=-1, criterion=\"entropy\"),\r\n \"knn_1\": KNeighborsClassifier(n_neighbors=1, n_jobs=-1, metric=\"hamming\"),\r\n \"knn_5\": KNeighborsClassifier(n_neighbors=5, n_jobs=-1, metric=\"hamming\"),\r\n \"knn_15\": KNeighborsClassifier(n_neighbors=15, n_jobs=-1, metric=\"hamming\"),\r\n \"cnb\": ComplementNB(),\r\n }\r\n\r\n for model_name in models.keys():\r\n cross_validate(estimator=models[model_name], X=X, y=y, num_splits=5, save_name=model_name)", "def fit(self, X, y, sample_weight=None):\n self.forest.fit(X, y.ravel(), sample_weight)\n self.trainy = y.copy()\n ntrees = self.forest.n_estimators\n ntrain = y.shape[0]\n self.train_tree_node_ID = np.zeros([ntrain, ntrees])\n for i in range(ntrees):\n self.train_tree_node_ID[:, i] = self.forest.estimators_[i].apply(X)\n self.train_tree_node_ID = self.train_tree_node_ID.astype('h') # because it is only indexes, store as short int", "def fit_test(self):", "def fit(self, X,y):\n pass", "def tree_model(feature_train, help_rank_train, model_name):\n decision_tree = DecisionTreeClassifier()\n decision_tree = decision_tree.fit(feature_train, help_rank_train)\n tree_model = open(model_name,'wb')\n dump(decision_tree, tree_model, -1)\n return", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def fit(self, x):\n raise NotImplementedError()", "def fit(self, X, Y):\n ...", "def fit(self, col_names, rows):\n # Last el is alwasy label for this setup.\n for _ in range(self.n_samples):\n k_indices = np.random.choice(len(col_names) - 1, self.n_features, replace=False)\n tree = DecisionTree(feat_indices=k_indices)\n tree.fit(col_names, rows)\n self.forest.append(tree)", "def fit(self):\n raise NotImplementedError # pragma: no cover", "def fit(self, x, y, debug=False):\n\n # Set feature names\n self.set_names(x)\n\n # Convert to mats if not\n x = self.strip_df(x)\n y = self.strip_df(y)\n\n self.tree = Tree.build_nodes(x, y,\n max_depth=self.params['max_depth'],\n min_data=self.params['min_data'],\n dynamic_bias=self.params['dynamic_bias'],\n debug=debug, names=self.feature_names)\n\n return self", "def fit(self, X: np.ndarray, y: np.ndarray):\n # Create the root node\n self.root = TreeSplits()\n\n # Get all possible values for discrete valued columns\n # Necessary so each split can handle unique X values that\n # were not in the training set.\n self.discrete_value_maps = {\n col_idx: np.unique(X[:, col_idx])\n for col_idx, col_type in self.col_type_map.items()\n if col_type == \"discrete\"\n }\n\n # Start splitting on the root node.\n self.get_next_split(X=X, y=y, tree_split=self.root)", "def fit(self, X, y=None):\n \n self.models = [\n self._load_model(loc)\n for loc in self.model_locs]\n self.estimator = Pipeline(steps=[\n (\"imputer\", SimpleImputer()),\n (\"clf\", RandomForestClassifier(\n n_estimators=self.n_estimators, \n min_samples_split=self.min_samples_split,\n max_features=self.max_features))\n ])\n \n probs = self._transform_models(X)\n self.estimator.fit(probs, y)\n self.classes_ = self.estimator.classes_\n return self", "def fit(self, X: np.ndarray, Y: np.ndarray):\n\n self.ind = 100\n self.X = X\n self.Y = Y\n\n self.M = int(np.max([int(X.shape[0]) / self.points_per_experts, 1]))\n self.partition_type = partition_type\n \n \n self.N = int(X.shape[0] / self.M)\n\n \n self.partition = np.random.choice(X.shape[0],size=(self.M, self.N),replace=False)\n \n \n lengthscales = tf.convert_to_tensor([1.0] * self.X.shape[1], dtype=default_float())\n self.kern = gpflow.kernels.RBF(lengthscales=lengthscales)\n\n\n self.invlink = gpflow.likelihoods.RobustMax(self.C) \n self.likelihood = gpflow.likelihoods.MultiClass(self.C,invlink=self.invlink)\n\n\n ivs = []\n for i in range(self.M):\n init_method = ConditionalVariance()\n Z = init_method.compute_initialisation(np.array(X[self.partition[i]].copy()), self.ind, self.kern)[0]\n ivs.append(tf.convert_to_tensor(Z))\n \n self.experts = []\n \n for i in range(self.M):\n expert = gpflow.models.SVGP(kernel = self.kern, likelihood = self.likelihood, num_latent_gps = self.C, inducing_variable = ivs[i])\n self.experts.append( expert )\n \n for expert in self.experts:\n gpflow.set_trainable(expert.inducing_variable, True)\n\n self.opt = tf.keras.optimizers.Adam(learning_rate=0.05)\n\n self.optimize()", "def predict(self, X):\n if self._tree is None:\n raise RuntimeError(\"Estimator not fitted, call `fit` first\")\n\n def _classify(tree, x):\n \"\"\"Classify a single sample with the fitted decision tree.\n\n Args:\n x: ((pd.Dataframe) a single sample features, of shape (D,).\n\n Returns:\n (int): predicted testing sample label.\n \"\"\"\n # YOUR CODE HERE\n # begin answer\n feature_name=list(tree.keys())[0] #first element\n secondDict=tree[feature_name] \n key=x.loc[feature_name] #extract value from x\n for key_val in secondDict:\n feature_val=key_val[0]\n valueOfKey=secondDict[(feature_val, key>=feature_val)]\n if isinstance(valueOfKey,dict):\n label=_classify(valueOfKey,x)\n else:\n label=valueOfKey\n return label\n # end answer\n\n # YOUR CODE HERE\n # begin answer\n if len(X.shape)==1:\n return _classify(self._tree,X)\n else:\n results=[]\n for i in range(X.shape[0]):\n results.append(_classify(self._tree, X.iloc[i, :]))\n return np.array(results)\n # end answer", "def decision_tree(df):\n features = df[['Temperature(F)', 'Humidity(%)', 'Visibility(mi)', 'Wind_Speed(mph)',\n 'Precipitation(in)', 'Amenity', 'Bump', 'Crossing', 'Give_Way',\n 'Junction', 'No_Exit', 'Railway', 'Roundabout', 'Station', 'Stop',\n 'Traffic_Calming', 'Traffic_Signal', 'Civil_Twilight', 'Rush Hour', 'Weekend',\n 'Side_R', 'Season_Spring', 'Season_Summer',\n 'Season_Winter', 'Weather_Condition_Clear', 'Weather_Condition_Fog',\n 'Weather_Condition_Other', 'Weather_Condition_Rain',\n 'Weather_Condition_Snow', 'Weather_Condition_Thunderstorm']]\n X= features\n y = df['Severity']\n clf = DecisionTreeClassifier(min_samples_split=6, min_samples_leaf=2, max_depth=3, \n criterion = 'gini', random_state=42)\n clf.fit(X, y)\n\n plt.figure(figsize=(25,10))\n a = plot_tree(clf, \n feature_names=X.columns.to_list(), \n filled=True, \n rounded=True, \n fontsize=14)\n plt.savefig(\"../Images/rockies_decision_tree.png\")\n plt.show()", "def fit_model(X, y,metric, model):\n cv_sets = ShuffleSplit(n_splits=10, test_size= 0.2, train_size= 0.8, random_state=42)\n \n\n if model == 'regression_tree':\n\n clf = DecisionTreeRegressor(random_state=42)\n\n # Creating a dictionary for the parameter 'max_depth' with a range from 1 to 10\n param = {\n 'max_depth': [1,2,3,4,5,6,7,8,9,10]\n }\n\n\n elif model == 'ridge':\n clf = Ridge(random_state=42, fit_intercept=False)\n param = {\n 'alpha': [0, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000]\n }\n\n\n if metric == 'r2':\n scoring_fnc = make_scorer(r_squared,greater_is_better=True)\n\n elif metric == 'rss':\n scoring_fnc = make_scorer(rss, greater_is_better=False)\n\n # Creating the grid search cv object --> GridSearchCV()\n grid = GridSearchCV(estimator=clf, param_grid=param, cv=cv_sets,scoring= scoring_fnc)\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n # Return the optimal model after fitting the data\n return grid.best_estimator_", "def fit(self, X, y):\n\n # Store the classes seen during fit\n self.classes_ = unique_labels(y)\n\n # RankedSVM requires a very specific format of y\n # Each row should represent a label, consisiting of ones and minus ones\n y = np.transpose(y).astype(np.int16)\n y[y == 0] = -1\n self.X_ = X\n self.y_ = y\n self.num_class = y.shape[0]\n\n Weights, Bias, SVs =\\\n RankSVM_train(train_data=X,\n train_target=y,\n cost=self.cost,\n lambda_tol=self.lambda_tol,\n norm_tol=self.norm_tol,\n max_iter=self.max_iter,\n svm=self.svm, gamma=self.gamma,\n coefficient=self.coefficient,\n degree=self.degree)\n\n self.Weights = Weights\n self.Bias = Bias\n self.SVs = SVs\n\n return self", "def fit(self, x, y):\n\n self.ohe = OneHotEncoder(sparse=False)\n if len(y.shape)>1:\n self.ohe.fit(y)\n else:\n self.ohe.fit(y.reshape(-1, 1))\n\n print('Stage 1')\n x_ = self.scaler_s1.fit_transform(x)\n self.basemodel.fit(x_, y)\n self.training_hit_probability = self._hitprobability(x_, y)\n\n # Learn the hit probability\n self.hitproba = HitProbability()\n self.hitproba.fit(x_, self.training_hit_probability)\n\n # Learn high confidence for all classes\n hm_y, auto_gamma = self._adjust_gamma_classes(self.training_hit_probability, y)\n self.joint_class_hc = HC_LR()\n self.joint_class_hc.fit(x_, hm_y)\n\n if self.verbose:\n print('Average hm score', str(np.mean(hm_y)))\n\n print('Stage 2')\n # Stage 2\n hm_1hot = self._one_hot(self.training_hit_probability, y)\n\n # Train stage2\n self.confidencemodel.fit(x_, hm_1hot)", "def predict_decision_tree(input_data=rand_input, tree=dtr_full):\n return y_scaler.inverse_transform(tree.predict(input_data))", "def fit(self):\n self.eval_chisq([1, 1, 1, 1])", "def fit_transform(self, X, y, sample_weight=None):\n # Instantiate rule ensemble generator and set parameters\n if isinstance(self.base_estimator, XGBClassifier):\n self.base_estimator.set_params(n_estimators=self.n_estimators, silent=(self.verbose>0),\n max_depth=self.max_depth, n_jobs=self.n_jobs)\n elif isinstance(self.base_estimator, RandomForestClassifier):\n warnings.warn('This base_estimator implementation has not been tested in a while!')\n self.base_estimator.set_params(n_estimators=self.n_estimators, verbose=self.verbose,\n max_depth=self.max_depth, n_jobs=self.n_jobs)\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n warnings.warn('This base_estimator implementation has not been tested in a while!')\n self.base_estimator.set_params(n_estimators=self.n_estimators, verbose=self.verbose,\n max_depth=self.max_depth, n_jobs=self.n_jobs)\n else:\n raise NotImplementedError\n \n # Name features\n if isinstance(X, DataFrame):\n self.features = X.columns.values\n else:\n self.features = ['f'+str(i) for i in range(X.shape[1])]\n \n # Check input\n X = check_array(X)\n \n # Generate and extract rules\n if not self.rand_tree_size:\n self.base_estimator.fit(X, y, sample_weight=sample_weight)\n if isinstance(self.base_estimator, XGBClassifier):\n self._rule_dump = self.base_estimator._Booster.get_dump()\n else:\n NotImplementedError() # TODO: work out how to incrementally train XGB\n \n if self.verbose > 0:\n print('fitting trees')\n \n # For each tree: get leaf numbers and map them to [0, num leaves]\n # before one-hot encoding them\n n_values = \"auto\"\n leaves_l = []\n for tree_i in self._rule_dump:\n leaves = [int(i) for i in re.findall(r'([0-9]+):leaf=', tree_i)]\n leaves_l.append(leaves)\n self._one_hot_encoder = LabelOneHotEncoder(leaves_l)\n \n if self.verbose > 0:\n print('setup encoding')\n \n # Scale and centre linear features\n X = self.ext_scaler.fit_transform(X)\n \n if self.linear_features:\n # Linear features must be scaled to have same weighting as an average rule\n self._scaler = FriedScaler(quantile=self.linear_feature_quantile)\n X_scale = self._scaler.fit_transform(X)\n X_transform = hstack([X_scale, self._one_hot_encoder.fit_transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n X_transform = self._one_hot_encoder.fit_transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))\n \n if self.verbose > 0:\n print('encoded')\n \n # Fit sparse linear model to rules (and optionally linear features)\n self.LR = LogisticRegression(C=self.C, penalty=self.penalty, class_weight=self.class_weight,\n warm_start=self.warm_start, solver='saga', verbose=self.verbose)\n self.LR.fit(X_transform, y, sample_weight=sample_weight)\n \n if self.verbose > 0:\n print('fitted')\n \n # Mask features with zero co-efficients\n # self.feature_mask_ = np.arange(self.LR.coef_.size)\n self.feature_mask_ = self.LR.coef_.nonzero()[1]\n \n self.coef_ = self.LR.coef_[0, self.feature_mask_]\n self.intercept_ = self.LR.intercept_\n self.get_feature_names()\n assert self.features_.size == self.feature_mask_.size\n return X_transform", "def fit(self, data, target):\n tree = TreeGrower(self.min_samples_split,\n self.criterion)\n self.tree = tree.grow(data, target, data.columns.tolist())\n return self", "def _tree_predict(self, estimator_id, X):\n return self.estimators_[estimator_id].predict(X) * self.learning_rate", "def fit(self, data):\n raise NotImplementedError(\"To be implemented in sub classes\")", "def fit(self, dataset, verbose=False):\n self.inputs = dataset.shape[1]-1\n self.bits = np.ceil(\n np.log2(\n np.abs(\n np.amax(dataset, axis=0) -\n np.amin(dataset, axis=0)))).astype(np.int32)\n self.is_neg = (np.amin(dataset, axis=0) < 0).astype(np.int8)\n\n self.trees = []\n\n for i in range(self.n_trees):\n if verbose:\n print(\"... creating tree {}\".format(i))\n\n # as subsample is an expensive operation, we will only perform it if it\n # reduces the dataset substantially\n\n if self.sample_size and self.sample_size < 0.3 * dataset.shape[0]:\n if verbose:\n print(\"... generated subsample of size {}\".format(self.sample_size))\n sample = self.subsample(dataset)\n else:\n sample = dataset\n\n self.trees.append(fit_parallel(\n self.max_depth, self.min_size, sample, True))", "def fit(self, x: np.array, t: np.array, y: np.array) -> None:\n\n self.forest.fit(x, y)", "def fit(self, X, y):\n\n X, y = check_X_y(X, y, dtype=DTYPE)\n y = self._validate_y_class(y)\n \n # self.n_outputs_ = y.shape[1]\n n_samples, self.n_features_ = X.shape\n\n if self.sample_weight is None:\n self.sample_weight = np.ones((n_samples,))\n\n self._validate_estimator()\n\n random_state = check_random_state(self.random_state)\n\n if not self.warm_start:\n self.init_stage()\n\n n_more_estimators = self.n_estimators - len(self.estimators_)\n\n if n_more_estimators < 0:\n raise ValueError('n_estimators=%d must be larger or equal to '\n 'len(estimators_)=%d when warm_start==True'\n % (self.n_estimators, len(self.estimators_)))\n\n elif n_more_estimators == 0:\n warn(\"Warm-start fitting without increasing n_estimators does not \"\n \"fit new trees.\")\n\n else:\n if self.warm_start and len(self.estimators_) > 0:\n # We draw from the random state to get the random state we\n # would have got if we hadn't used a warm_start.\n random_state.randint(MAX_INT, size=len(self.estimators_))\n\n trees = []\n for i in xrange(n_more_estimators):\n tree = self._make_estimator(append=False,\n random_state=random_state)\n trees.append(tree)\n\n for i in xrange(self.n_estimators):\n tree = self._build_base_estimators(trees[i], X, y)\n if tree is None:\n warn(\"cannot fit %d estimators. %d esitmators are fitted.\" % (self.n_estimators, i+1))\n break\n trees[i] = tree\n self.estimators_.extend(trees[:i+1])\n\n return self._post_process(X, y)", "def fit(self, X, Y):\n if self.model is None:\n print(\"%s.fit: implement me\" % (self.__class__.__name__))", "def cross_validation(self, x, t, k=5):\n print(\"Cross validation of the Decision Tree Classifier...\")\n bestCriteria = ''\n bestMax_depth= 2\n bestError = float('inf')\n\n N = len(x)\n N_train = math.floor(0.8 * N)\n\n dicCriteria = ['gini', 'entropy']\n min_depth = 2\n max_depth = 40\n\n for crit in dicCriteria:\n for d in range(min_depth, max_depth):\n errors = np.zeros(k)\n\n for j in range(k):\n map_index = list(zip(x, t))\n random.shuffle(map_index)\n random_X, random_t = zip(*map_index)\n\n train_x = random_X[:N_train]\n valid_x = random_X[N_train:]\n train_t = random_t[:N_train]\n valid_t = random_t[N_train:]\n\n self.model = tree.DecisionTreeClassifier(max_depth=d, criterion=crit)\n self.train(train_x, train_t)\n error_valid = np.array([self.error(x_n, t_n)\n for t_n, x_n in zip(valid_t, valid_x)])\n errors[j] = error_valid.mean()\n\n mean_error = np.mean(errors)\n if mean_error < bestError:\n bestError = mean_error\n bestCriteria = crit\n bestMax_depth = d\n print(\"The new best hyper parameters are : \", bestMax_depth, bestCriteria)\n\n print(\"Best hyper parameters are : \", bestMax_depth, bestCriteria)\n print(\"Validation error : \", 100 * bestError, \"%\")\n self.model = tree.DecisionTreeClassifier(max_depth=bestMax_depth, criterion=bestCriteria)\n self.train(x, t)", "def fit_training_data(self):\n self.model.fit(self.X_train)", "def show(self):\n if self._tree is None:\n raise RuntimeError(\"Estimator not fitted, call `fit` first\")\n\n import tree_plotter\n tree_plotter.createPlot(self._tree)", "def fit(self, X):\n raise NotImplementedError('Abstract method \"fit\" must be '\n 'specialised!')", "def create_tree(f_train, f_test, l_train, l_test):\n # initialize model\n model = DecisionTreeClassifier(max_depth=2)\n\n # train it on training data\n model.fit(f_train, l_train)\n\n # gather the model's predictions for train\n train_predictions = model.predict(f_train)\n\n # gather the model's predictions for test\n test_predictions = model.predict(f_test)\n\n # calculate accuaracy of train\n print('Tree Train Accuracy: ', accuracy_score(l_train, train_predictions))\n\n # calculate accuracy of test\n print('Tree Test Accuracy: ', accuracy_score(l_test, test_predictions))\n\n return model", "def fit(self, X, y) :\n \n ### ========== TODO : START ========== ###\n # part b: set self.probabilities_ according to the training set\n training_set = y.sum()/y.size\n self.probabilities_ = training_set\n return self\n ### ========== TODO : END ========== ###\n \n return self", "def fit(self):\n accuracy = 0\n no_improvement = 0\n epochs = trange(self.args.epochs, desc=\"Accuracy\")\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)\n self.model.train()\n for epoch in epochs:\n self.optimizer.zero_grad()\n prediction = self.model(self.propagation_matrix, self.features)\n loss = torch.nn.functional.nll_loss(prediction[self.train_nodes], self.target[self.train_nodes])\n loss = loss + self.args.lambd*torch.sum(self.model.page_rank_convolution_1.weight_matrix**2)\n loss.backward()\n self.optimizer.step()\n new_accuracy = self.score(self.validation_nodes)\n epochs.set_description(\"Validation Accuracy: %g\" % round(new_accuracy,4))\n if new_accuracy < accuracy:\n no_improvement = no_improvement + 1\n if no_improvement == self.args.early_stopping:\n epochs.close()\n break\n else:\n no_improvement = 0\n accuracy = new_accuracy \n acc = self.score(self.test_nodes)\n print(\"\\nTest accuracy: \" + str(round(acc,4)) )", "def fit(self, X, y=None, feature_names=None):\n if type(X) == pd.DataFrame:\n X = X.values\n if type(y) in [pd.DataFrame, pd.Series]:\n y = y.values\n\n self.n_obs = X.shape[0]\n self.n_features_ = X.shape[1]\n self.feature_names_, self.feature_dict_ = enum_features(X, feature_names)\n\n self.tree_generator = self._get_tree_ensemble(classify=False)\n self._fit_tree_ensemble(X, y)\n\n extracted_rules = self._extract_rules()\n self.rules_without_feature_names_, self.coef, self.intercept = self._score_rules(X, y, extracted_rules)\n\n return self", "def buildDecisionTree(self, data):\n self.data = data\n self.decisionTree = self.buildTree(self.data, self.listAttributes)\n with open(\"decision_tree_model\", \"wb\") as f:\n pickle.dump(self.decisionTree, f, pickle.HIGHEST_PROTOCOL)\n return self.decisionTree", "def _fit(self):\n\n\t\tclf = LogisticRegression()\n\t\tclf.fit(inputs, labels)\n\n\t\treturn clf", "def fit(self):\n sys.stdout.write(\n \"Fitting a predictor for X:{}, y:{}, method:{}... please wait.\\n\"\n .format(self.dataset.data_name,\n self.dataset.trait_name,\n self.predictor_name))\n\n self.predictor.fit(self.dataset.X, self.dataset.y)\n self.has_been_fit = True\n sys.stdout.write(\"\\tFinished.\\n\")\n # Collect scores from predictor, rename innate scores variable to\n # self.scores_\n scores = self.predictor.predictor_scoring_fun(self.predictor)\n self.scores_ = pd.Series(index=self.X.columns, data=scores)\n self.has_been_scored = True", "def fit(self, data):\n for v in self.features + self.targets:\n v._fit(data)", "def fit(self):\n self.lr = LRHMC( self.X_train, self.X_test, self.y_train, self.y_test )\n self.lr.fit()", "def _loop_fit_tree(tree, guess_mat, fit_results, vdc_shifted, shift_ind):\n # print('Now fitting cluster #{}'.format(tree.name))\n # I already have a guess. Now fit myself\n curr_fit_results = fit_loop(vdc_shifted, np.roll(tree.value, shift_ind), guess_mat[tree.name])\n # keep all the fit results\n fit_results[tree.name] = curr_fit_results\n for child in tree.children:\n # Use my fit as a guess for the lower layers:\n guess_mat[child.name] = curr_fit_results[0].x\n # Fit this child:\n guess_mat, fit_mat = _loop_fit_tree(child, guess_mat, fit_results, vdc_shifted, shift_ind)\n return guess_mat, fit_results", "def fit(self, X, y, **fit_params):\n ...", "def train_model(regressor=DecisionTreeRegressor(max_depth=3, random_state=42),\n dataset_file_name=DATASET_FILE_NAME,\n model_file_name=MODEL_FILE_NAME):\n df = pd.read_csv(dataset_file_name)\n\n # A minumum amount of feature engineering: The player's and opponent's\n # exact score may not be that important for our decisions. The difference,\n # however, certainly is. Moreover, the card value itself is not that\n # important. Here, the sum is.\n df['score_difference'] = df.self_score - df.opp_score\n df.drop(columns=['opp_score'], inplace=True)\n df['score_if_card_played'] = df.self_score + df.result_card_val\n df.drop(columns=['result_card_val'], inplace=True)\n\n # Strategy will be to let our model predict the score for different actions\n # Hence, we're going to train the model on that now\n X, y = df.drop(columns='score'), df.score\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\n regressor.fit(X_train, y_train)\n\n feature_names = ['self_score', 'opp_stands', 'result_stand',\n 'score_difference', 'score_if_card_played']\n\n score = regressor.score(X_test, y_test)\n print(f\"Score on the test set: {score}.\")\n if isinstance(regressor, DecisionTreeRegressor):\n export_graphviz(regressor, feature_names=feature_names,\n out_file=GRAPHVIZ_FILE_NAME, filled=True)\n\n # For persistence, we export the generated model\n dump(regressor, model_file_name)\n return score", "def fit(self, X, y, dview = None):\n\t\t#Get classes\n\t\tclasses, y[:] = numpy.unique(y[:], return_inverse=True)\n\t\tself.classes_ = classes\n\t\tself.n_classes_ = classes.shape[0]\n\t\tforests = []\n\n\t\tfeatureFunction = self.featureFunction\n\t\tfor i in range(self.n_forests):\n\t\t\tprint(\"forest : \",i+1,\" / \",self.n_forests)\n\t\t\tif (i != 0):\n\t\t\t\tif(self.specialisation == 'global'):\n\t\t\t\t\tacc = forest.getFeatureImportance()\n\t\t\t\t\tfeatureFunction.random_weight = acc\n\t\t\t\telif(self.specialisation =='per_class'):\n\t\t\t\t\tacc_per_class = forest.getFeatureImportanceByClass()\n\t\t\t\t\tfeatureFunction.random_weight_per_class = acc_per_class\n\n\t\t\tforest = deepcopy(self.forest)\n\t\t\tforest.featureFunction = featureFunction\n\t\t\tforest.fit(X, y, dview)\n\t\t\tforests.append(forest)\n\n\t\t# Collect newly grown Forests\n\t\tself.forests_.extend(forests)" ]
[ "0.7569848", "0.74153537", "0.72756153", "0.72105074", "0.6821379", "0.67818767", "0.662454", "0.6596738", "0.65849745", "0.65849745", "0.65598154", "0.65543187", "0.6544427", "0.6542975", "0.6537915", "0.65235364", "0.650521", "0.65000254", "0.6415684", "0.64080507", "0.63334537", "0.6306495", "0.6283732", "0.6282245", "0.62488604", "0.62404025", "0.6231845", "0.62313604", "0.6229145", "0.62077147", "0.6204043", "0.62022096", "0.619403", "0.61760926", "0.61760926", "0.61760926", "0.61760926", "0.61760926", "0.61760926", "0.61760926", "0.61760926", "0.61760926", "0.61760926", "0.6130158", "0.6126287", "0.611689", "0.60993433", "0.60993433", "0.60993433", "0.6097708", "0.6095814", "0.6087732", "0.60842496", "0.6078816", "0.6069515", "0.6062473", "0.6056125", "0.6054347", "0.60414255", "0.6036728", "0.6017604", "0.6009785", "0.6009352", "0.600902", "0.59882045", "0.59772706", "0.59708786", "0.59656185", "0.59582883", "0.59500134", "0.59499127", "0.59486026", "0.59458745", "0.59225845", "0.59195673", "0.5918496", "0.59097975", "0.59091127", "0.59037066", "0.58979994", "0.5890841", "0.5888037", "0.5885533", "0.5872779", "0.58688945", "0.58685035", "0.58580637", "0.5835262", "0.58343667", "0.5834196", "0.5833743", "0.5827492", "0.58223605", "0.5821499", "0.5819575", "0.5812104", "0.5811015", "0.5805661", "0.5802275", "0.580092" ]
0.7340412
2
Return the accuracy attained by the knn on the test data set.
def score_one(self, test_data): test_in, test_labels = self._split_inputs_outputs(test_data) correct = 0 total = 0 for i, test_input in enumerate(test_in): prediction = self.model.predict(test_input.reshape(1,-1)) if prediction[0] == test_labels[i]: correct+=1 total+=1 return float(correct)/total
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getAccuracy(self):\n\t\tcorrect = (self.testing[self.classLabel]==self.bestLabel).sum()\n\t\tself.accuracy = (correct/float(len(self.testing))) * 100.0", "def get_accuracy(self) -> float:\n self.network.load_data()\n self.network.train()\n\n n = len(self.network.y_test)\n correct = 0\n for i in range(n):\n # Predict by running forward pass through the neural network\n pred = self.network.predict(self.network.x_test[i])\n # Sanity check of the prediction\n assert 0 <= pred <= 1, \"The prediction needs to be in [0, 1] range.\"\n # Check if right class is predicted\n correct += self.network.y_test[i] == round(float(pred))\n return round(correct / n, 3)", "def accuracy(y_test, y_pred):\n\treturn accuracy_score(y_test, y_pred)", "def accuracy(self, X_train, X_test):\n loss, accuracy = self.estimator.evaluate(X_test, X_train)\n return accuracy", "def accuracy(self, X_test, y_test):\n\t\tif X_test.ndim == 1:\n\t\t\tX_test = np.reshape(X_test, (X_test.shape[0],1))\n\t\ty_pred = self.predict(X_test)\n\t\treturn np.sum(np.argmax(y_pred,axis=1)==np.argmax(y_test,axis=1))/float(y_test.shape[0])", "def findOveralAccuracy(trainData,testData):\r\n kNNClassifier = kNN(trainData)\r\n \r\n All_Predictions = kNNClassifier.classify(testData,k=5)\r\n \r\n reference_dictionary = testData.dataDict['Species']\r\n\r\n Overall_Accuracy = 100*sum(reference_dictionary== All_Predictions)/len(All_Predictions)\r\n \r\n return All_Predictions, Overall_Accuracy", "def testAccuracy(self):\n \n loader = torch.utils.data.DataLoader(dataset=self.test, \n shuffle=False)\n acc = accuracy(self.model, loader)\n self.assertEqual(acc, 1.0)\n print(acc)", "def train_accuracy(self):\n # Train accuarcy\n add = np.ones(len(self.X_train))\n X_add1 = np.c_[add, self.X_train]\n pred_train = np.dot(X_add1, self.w_result.T)\n pred_train[pred_train > 0] = 1\n pred_train[pred_train < 0] = 0\n print(pred_train)\n train_check_lable = np.isclose(pred_train, self.y_train)\n num_true_lable = np.sum(train_check_lable)\n num_all_lable = np.size(train_check_lable)\n train_accuracy = num_true_lable / num_all_lable\n print(\"train_accuracy is: %f\" %train_accuracy)\n return train_accuracy", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.data.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def accuracy(self):\r\n # Load tarined model using intent id.\r\n clf = joblib.load(filename=self.intention_id+'.pkl')\r\n # Compute accuracy for hole training data and return.\r\n return clf.score(X=self.training_data, y=self.target_data)", "def accuracy(predictions, test_labels):\n return f1_score(test_labels, predictions, average='micro') * 100", "def check_test_accuracy(self):\n print('\\n# Evaluate on test data')\n results = self.model.evaluate(self.data.test_dataset)\n print('\\ntest loss, test acc:', results)", "def compute_accuracy(self):\n if not self.is_training:\n logits = self.test_logits\n labels = self.data.test_labels\n else:\n logits = self.train_logits\n labels = self.data.labels\n\n predictions = tf.cast(tf.argmax(logits, 1), tf.int32)\n correct = tf.equal(labels, predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def accuracy(self):\n\t\treturn self.accuracy_", "def test(self):\n self.load()\n bottleneck_features = np.load(self.feature_path)\n test = bottleneck_features['test']\n _, test_targets = load_dataset(self.image_path_test) \n predictions = [np.argmax(self.model.predict(np.expand_dims(feature, axis=0))) for feature in test]\n test_accuracy = 100*np.sum(np.array(predictions) == np.argmax(test_targets, axis=1))/len(predictions)\n print('{}, test accuracy: {:.4f}%'.format(self.name, test_accuracy))\n return test_accuracy", "def get_test_accuracy(model, X_test, y_test):\n # Make predictions - test accuracy\n test_pred = model.predict(X_test)\n score = accuracy_score(test_pred, y_test)\n print(\"Test Accuracy:\", score)\n\n return test_pred", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.episode.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def accuracy(cls, test_labels):\n N = len(test_labels)\n\n # Calculate total correct as precentage\n total_correct = 100*(N - np.count_nonzero(cls - test_labels))/N\n\n # Calculate precentag correct for each class\n lab = np.unique(test_labels)\n cls_correct = {}\n for label in lab:\n idx = np.where(test_labels == label)[0]\n N_cls = len(idx)\n cls_correct[label] = 100*(N_cls - np.count_nonzero(label -\n cls[idx]))/N_cls\n\n print(\"Accuracy for:\")\n print(\"All classes is %.2f%%\" % total_correct)\n for label in lab:\n print(\"Class %d is %.2f%%\" % (label, cls_correct[label]))\n return(total_correct, cls_correct)", "def accuracy(preds, labels):\n correct = preds == labels\n return correct.sum().float() / correct.shape[0]", "def accuracy(self):\n total_predictions = self.tp + self.fp + self.tn + self.fn;\n return float(self.tp + self.tn) / total_predictions if total_predictions != 0 else 1", "def accuracy(self):\n return (self.table[0, 0] + self.table[1, 1]) / self.N", "def accuracy(self):\r\n return self._feature.attributes[self._schema.accuracy]", "def get_accuracy(self, k=None):\n k = 1 if k is None else k\n n_correct = 0\n \n for query, answer in tqdm(zip(self.test_queries, self.results)):\n correct_set = self.correct_answers[query]\n is_correct = False\n for candidate in answer[:k]:\n if candidate in correct_set:\n is_correct = True\n break\n n_correct += int(is_correct)\n \n return n_correct / len(self.test_queries)", "def accuracy(pred, target):\n N = pred.shape[0]\n return (pred == target).sum() * 1.0 / N", "def accuracy(self):", "def getaccuracy(features: ndarray, target: ndarray, trained_model) -> float:\n predictions = trained_model.predict(features)\n\n accuracy = accuracy_score(target, predictions, normalize=True)\n\n return accuracy", "def accuracy(self):\n if not self.run:\n self._run()\n return self.model_acc", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n return [correct[:k].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]", "def get_accuracy(test_sets, predictions, class_index):\n actual_classes = [test_set[class_index] for test_set in test_sets]\n\n num_correct = sum(int(actual == prediction) for actual, prediction in zip(actual_classes, predictions))\n\n return float(num_correct) / len(test_sets)", "def accuracy(predictions, targets):\n return accuracy", "def accuracy(classifier_output, true_labels):\n\n # TODO: finish this.\n conf_matrix = confusion_matrix(classifier_output, true_labels)\n return (conf_matrix[0][0]+conf_matrix[1][1])/(conf_matrix[0][0] + conf_matrix[0][1]\\\n + conf_matrix[1][0] + conf_matrix[1][1])", "def accuracy(self, X, y):\n pred_labels = self.predict(X)\n return np.sum(pred_labels == y) / pred_labels.shape[0]", "def accuracy(y_true, y_pred):\n assert y_true.shape == y_pred.shape\n return (y_true == y_pred).mean()", "def show_accuracy(self):\r\n return round(accuracy_score(self.actual, self.predicted),2)", "def accuracy(predicted, ground_truth):\n predicted_labels_decoded = np.argmax(predicted, axis=1)\n ground_truth_labels_decoded = np.argmax(ground_truth, axis=1)\n correct_rate = [1 if pred == truth else 0 for (pred, truth) in\n zip(predicted_labels_decoded, ground_truth_labels_decoded)]\n accuracy = sum(correct_rate) / ground_truth_labels_decoded.size\n return accuracy * 100", "def accuracy(self, X_train, y_train):\n y_train_pred = self.predict(X_train)\n diffs = y_train_pred - y_train\n count = 0.\n for i in range(y_train.shape[0]):\n if diffs[i] != 0:\n count+=1\n return 100 - count*100/y_train.shape[0]", "def accuracy(output, target, topk=(1,)):\n corrrect_ks = correct(output, target, topk)\n batch_size = target.size(0)\n return [correct_k.float().mul_(100.0 / batch_size) for correct_k in corrrect_ks]", "def accuracy(output, target, topk=(1,)):\n corrrect_ks = correct(output, target, topk)\n batch_size = target.size(0)\n return [correct_k.float().mul_(100.0 / batch_size) for correct_k in corrrect_ks]", "def accuracy(gt, pred):\n \n return np.mean(gt == pred)", "def accuracy(output, target): # , topk=(1,)):\n correct = 0\n batch_size = target.size(0)\n for i in range(batch_size):\n tar = target[i].data.cpu().numpy()\n pred = output[i].data.cpu().numpy()\n if (tar) == np.argmax(pred):\n correct += 1\n return float(correct/batch_size)", "def accuracy(output, target, topk=(1,)):\n\tmaxk = max(topk)\n\tbatch_size = target.size(0)\n\n\t_, pred = output.topk(maxk, 1, True, True)\n\tpred = pred.t()\n\tcorrect = pred.eq(target.view(1, -1).expand_as(pred))\n\n\tres = []\n\tfor k in topk:\n\t\tcorrect_k = correct[:k].view(-1).float().sum(0)\n\t\tres.append(correct_k.mul_(100.0 / batch_size))\n\treturn res", "def overall_accuracy(y_true, y_pred):\n pred_flat, true_flat = y_pred.flatten(), y_true.flatten()\n intersection = list(pred_flat == true_flat).count(True)\n sum_ = len(true_flat)\n accuracy = round(intersection/sum_, 4)\n return accuracy", "def test(xtest, ytest, neural_net):\n loss, accuracy = neural_net.evaluate(xtest, ytest, verbose=0)\n return accuracy", "def test(self):\n self.eval()\n test_mask = self.data.test_mask\n labels = self.data.y\n output = self.forward(self.data)\n # output = self.output\n loss_test = F.nll_loss(output[test_mask], labels[test_mask])\n acc_test = utils.accuracy(output[test_mask], labels[test_mask])\n print(\"Test set results:\",\n \"loss= {:.4f}\".format(loss_test.item()),\n \"accuracy= {:.4f}\".format(acc_test.item()))\n return acc_test.item()", "def accuracy(output, target, topk=(1,)):\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n\r\n return res", "def accuracy(output, target, topk=(1,)):\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n return res", "def accuracy(output, target, topk=(1,)):\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n return res", "def test_nn_predicts_accurate_results(self):\n self.nn.train_nn(self.X_train, self.y_train, 6, 10, 0.06)\n accuracy = 0\n X_test, y_test = load_data(\"../data/testdata.mat.tar.gz\")\n for i in range(len(X_test[:100])):\n out = self.nn.forward_prop(X_test[i])[0][-1]\n if np.argmax(out) == np.where(y_test[i])[0][0]:\n accuracy += 1\n else:\n print(\"Incorrect\", np.argmax(out))\n print(\"accuracy: \", accuracy)\n self.assertGreaterEqual(accuracy, 70)", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n return res", "def accuracy(y_pred, y_actual, topk=(1, )):\n maxk = max(topk)\n batch_size = y_actual.size(0)\n\n _, pred = y_pred.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(y_actual.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n pred = pred.type_as(target)\n target = target.type_as(pred)\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].contiguous().view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n\n return res", "def accuracy(labels, preds):\n\tassert labels.shape[0]==preds.shape[0]\n\treturn np.sum(preds==labels)/float(labels.shape[0])", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(actual, predicted):\n return np.sum(predicted == actual) / actual.shape[0]", "def test(model, X_test, y_test, config):\n loss, y_pred = model.forward_pass(X_test)\n\n y_maxVals = np.amax(y_pred, axis=1).reshape(-1, 1)\n y_1hot = np.where(y_maxVals == y_pred, 1, 0)\n correct = np.sum(y_test * y_1hot)\n\n accuracy = correct / len(X_test)\n return accuracy", "def accuracy_score(y_true, y_pred):\n accuracy = np.sum(y_true == y_pred, axis=0) / len(y_true)\n return accuracy", "def accuracy_score(y_true, y_pred):\n accuracy = np.sum(y_true == y_pred, axis=0) / len(y_true)\n return accuracy", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n\n return res", "def accuracy_score(y_true, y_pred):\n\ttp, fn, fp, tn = confusion_matrix(y_true, y_pred, table_show=False)\n\n\treturn (tp+tn) / (tp+tn+fn+fp)", "def get_accuracy(pred, test_label, regression= \"logistic\"):\n if regression == \"multiclass\":\n pred_max = np.argmax(pred, axis=1)\n gt_max = np.argmax(test_label, axis=1)\n acc = np.sum(pred_max == gt_max)*100.0/pred.shape[0]\n elif regression == \"logistic\" or regression == \"probit\":\n if pred.ndim == 2:\n pred = pred[:,0]\n pred[pred >= 0.5] = 1.0\n pred[pred < 0.5] = 0.0\n acc = np.sum(pred == test_label)*100.0/pred.shape[0]\n\n return acc", "def get_accuracy(self, predicted_y, actual_y, log_tests=False):\n if log_tests:\n for i in range(actual_y.shape[0]):\n print 'predicted = {0}, actual = {1}'.format(predicted_y[i], actual_y[i])\n return float(sum(predicted_y == actual_y)) / predicted_y.shape[0]", "def test_accuracy(y, tx, w):\n labels = predict_regression_labels(w, tx)\n \n return (labels==y).sum()/len(y)", "def accuracy(output, target, topk=(1, 5)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(1. / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True) # pred是top k的索引值\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred)) # target每个样本只有一个值,表示具体类别值,expand之后比较是否相等,相等的就是对的\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) # top几的分类正确数量累加,然后除以batch_size就是准确率\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res, pred", "def knn(train_data, train_labels, test_data, test_labels, k):\n pred_labels = []\n for t in test_data:\n dist = calculate_distances(train_data, t)\n pred_class = majority_voting(dist, train_labels, k)\n pred_labels.append(pred_class)\n correct_pred_count = np.sum(pred_labels == test_labels)\n acc = correct_pred_count/len(test_labels)\n return acc" ]
[ "0.8180346", "0.80784214", "0.8005831", "0.80041337", "0.7895488", "0.77844036", "0.77788", "0.7739961", "0.77059036", "0.7696882", "0.7691345", "0.76276654", "0.76155436", "0.76090294", "0.7584759", "0.75794256", "0.74891686", "0.748881", "0.7452785", "0.7422463", "0.73850185", "0.73757035", "0.7368285", "0.73650914", "0.7361095", "0.7352222", "0.73162836", "0.7304859", "0.72815615", "0.72725505", "0.7259315", "0.72505397", "0.7230964", "0.72236943", "0.72166055", "0.7205636", "0.7202121", "0.7202121", "0.71935874", "0.71925175", "0.71861017", "0.7183251", "0.7142545", "0.7136622", "0.71300006", "0.71266204", "0.71266204", "0.71194196", "0.71191245", "0.7118854", "0.71186805", "0.71052647", "0.71040535", "0.70939624", "0.709354", "0.7091858", "0.7091858", "0.7091858", "0.7091858", "0.7091858", "0.7091858", "0.7091858", "0.7091858", "0.7091858", "0.7091858", "0.7091858", "0.7091858", "0.7091858", "0.7091858", "0.7091858", "0.7091858", "0.7091858", "0.709148", "0.70871925", "0.7086699", "0.7086699", "0.70841944", "0.70841944", "0.70841944", "0.70841944", "0.70841944", "0.70841944", "0.70841944", "0.70841944", "0.70841944", "0.70841944", "0.70841944", "0.70841944", "0.70832515", "0.7075606", "0.70750785", "0.7074523", "0.70728934", "0.7069295", "0.7067092", "0.7066967", "0.7064197", "0.70573467", "0.7054572", "0.70540875", "0.7052143" ]
0.0
-1
Use 10fold CV to produce a score
def score(self): splits = 10 score = 0 kf = KFold(n_splits=splits, shuffle=True) kf.get_n_splits(self.data) for train_ind, test_ind in kf.split(self.data): train = [self.data[ind] for ind in train_ind] test = [self.data[ind] for ind in test_ind] self.model = self._fit(train) temp_score = self.score_one(test) score += temp_score return score/float(splits)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cv_score(clf, x, y, score_func):\r\n result = 0\r\n nfold = 5\r\n for train, test in KFold(y.size, nfold): # split data into train/test groups, 5 times\r\n clf.fit(x[train], y[train]) # fit\r\n result += score_func(clf, x[test], y[test]) # evaluate score function on held-out data\r\n return result / nfold # average\r", "def random_search_cv(self, X_test, n_cv=5, n_folds_cv=5, evaluation_metric='top30'):\n # DON'T KNOW IF WE WILL IMPLEMENT IT\n # We may implement a method on a per-classifier bases\n # depending on if the classifier is based on a Scikit-learn classifier\n # or not\n pass", "def svm_cv(self, nsplits: int = 5) -> (float, float, float):\r\n c_cand = [0.1, 0.5, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 50, 100]\r\n cv = KFold(n_splits=nsplits)\r\n acc_result = []\r\n for c in c_cand:\r\n acc_result_c = []\r\n for train, test in cv.split(self.x):\r\n x_train = self.x[train, :]\r\n x_test = self.x[test, :]\r\n y_train = self.y[train]\r\n y_test = self.y[test]\r\n model = SVC(C=c, gamma='auto').fit(x_train, y_train)\r\n y_predict = model.predict(x_test)\r\n acc_result_c.append(binary_acc(y_test, y_predict))\r\n acc_result.append(np.mean(acc_result_c))\r\n best_c = c_cand[acc_result.index(max(acc_result))]\r\n return max(acc_result), np.std(acc_result), best_c", "def algo_CVmetrics(classifier_object, X_train, Y_train):\r\n \r\n cv = RepeatedStratifiedKFold(n_splits = 5, n_repeats = 3, random_state = seed_custom)\r\n \r\n metricslist = {'f2': make_scorer(metrics.fbeta_score, beta = 2), \r\n 'balacc': make_scorer(metrics.balanced_accuracy_score),\r\n 'precision': make_scorer(metrics.precision_score),\r\n 'recall': make_scorer(metrics.recall_score)}\r\n \r\n cv_results = cross_validate(classifier_object, X_train, Y_train, cv = cv, scoring = metricslist, return_estimator = True)\r\n \r\n f2_mean = np.mean(cv_results['test_f2'])\r\n f2_std = np.std(cv_results['test_f2'])\r\n \r\n balacc_mean = np.mean(cv_results['test_balacc'])\r\n balacc_std = np.std(cv_results['test_balacc'])\r\n\r\n precision_mean = np.mean(cv_results['test_precision'])\r\n precision_std = np.std(cv_results['test_precision'])\r\n \r\n recall_mean = np.mean(cv_results['test_recall'])\r\n recall_std = np.std(cv_results['test_recall'])\r\n \r\n scorebox = pd.DataFrame(np.zeros((1,8)), columns = list(['F2-Score Mean', 'F2-Score STD', 'Balanced Accuracy Mean', 'Balanced Accuracy STD',\r\n 'Precision Mean', 'Precision STD', 'Recall Mean', 'Recall STD']))\r\n \r\n scorebox.iloc[0,0] = f2_mean\r\n scorebox.iloc[0,1] = f2_std\r\n scorebox.iloc[0,2] = balacc_mean\r\n scorebox.iloc[0,3] = balacc_std\r\n scorebox.iloc[0,4] = precision_mean\r\n scorebox.iloc[0,5] = precision_std\r\n scorebox.iloc[0,6] = recall_mean\r\n scorebox.iloc[0,7] = recall_std \r\n \r\n scorebox = np.round(scorebox, 3)\r\n \r\n print(\"Model has a mean CV balanced accuracy of {0}, (Std: {1})\".format(round(balacc_mean,3), round(balacc_std,3)))\r\n print(\"Model has a mean CV F2_Score of {0}, (Std: {1})\".format(round(f2_mean,3), round(f2_std,3)))\r\n print(\"Model has a mean CV Precision of {0}, (Std: {1})\".format(round(precision_mean,3), round(precision_std,3)))\r\n print(\"Model has a mean CV Recall of {0}, (Std: {1})\".format(round(recall_mean,3), round(recall_std,3)))\r\n \r\n return scorebox", "def cv(data, folds, model):\n def rmsle(predicted, actual):\n # Root Mean Squared Logarithmic Error\n return mean_squared_error(\n np.log(predicted+1),\n np.log(actual+1)\n ) ** 0.5\n\n errors = []\n print \" Cross Validation in progress...\"\n kf = cross_validation.KFold(n=len(data.index), n_folds=folds)\n for i, (train_index, validation_index) in enumerate(kf):\n print ' F%d.' % i\n train = data.iloc[train_index]\n validation = data.iloc[validation_index]\n\n model.fit(train)\n prediction = model.predict(validation)\n actual = data.iloc[validation_index]['count'].as_matrix()\n error = rmsle(prediction, actual)\n errors.append(error)\n return np.mean(errors)", "def cross_validation(features, target, n_neighbors=5, n_folds=5):\n clf = KNeighborsClassifier(n_neighbors=n_neighbors)\n cv_scores = cross_val_score(clf, features, target, cv=n_folds)\n # print each cv score (accuracy) and average them\n print('Cross Validation Scores Mean: %.2f' % (np.mean(cv_scores) * 100))", "def fitting_scoring(features, cv=5, verbose=False, is_RFE_mode=False, n_dims_RFE=1):\n # N-fold cross-validation\n num_fold = cv\n accuracy = [0] * num_fold\n for i in range(num_fold):\n training_set = list()\n training_labels = list()\n testing_set = list()\n testing_labels = list()\n for family in features:\n feature_mat = features.get(family)\n if verbose: print(family, \"sample size:\", len(feature_mat))\n\n fold_start = i * int(len(feature_mat) / num_fold)\n fold_end = fold_start + int(len(feature_mat) / num_fold) - 1\n\n # separate training and testing set\n for j in range(len(feature_mat)):\n if fold_start <= j <= fold_end:\n testing_set.append(feature_mat[j])\n testing_labels.append(family)\n else:\n training_set.append(feature_mat[j])\n training_labels.append(family)\n\n p_res = None\n X_new = None\n X_mask = None\n if is_RFE_mode:\n clf = svm.SVC(kernel='linear')\n clf_reduced = RFE(clf, n_dims_RFE, step=1)\n clf_reduced = clf_reduced.fit(training_set, training_labels)\n X_new = clf_reduced.transform(training_set)\n X_mask = clf_reduced.get_support()\n p_res = clf_reduced.predict(testing_set)\n else:\n clf = svm.SVC()\n clf.fit(training_set, training_labels)\n p_res = clf.predict(testing_set)\n\n accuracy[i] = 0\n for j in range(len(p_res)):\n if p_res[j] == testing_labels[j]:\n accuracy[i] += 1\n accuracy[i] = (accuracy[i] / len(p_res)) * 100\n\n if is_RFE_mode:\n if verbose: print('n_dims:', n_dims_RFE, accuracy)\n return np.mean(accuracy), X_new, X_mask\n\n return np.mean(accuracy)", "def tenfold_cross_validation(X, y):\n\n i = 0\n x_score = []\n y_score = []\n\n for i in range(1, 11):\n for train_index, test_index in KFold(10).split(X):\n x_train, x_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n # change the parameters to see how each parameter affects the l1inear classifier\n linear_classifier = linear_model.SGDClassifier(max_iter=1000, tol=1e-3)\n\n # start training the classifier\n linear_classifier.fit(x_train, y_train)\n\n # create and plot the confusion matrix\n # cross validation done with cross_val_\n y_train_pred = cross_val_predict(linear_classifier, x_test, y_test, cv=10)\n\n print(\"\\n Statistics and Confusion matrix obtained with pandas_ml: \\n\")\n cm = ConfusionMatrix(y_test, y_train_pred)\n stats = cm.stats()\n\n file = open(\"linear_classification_9000_cross_validation_\" + str(i) + \".txt\", \"w\")\n file.write(str(stats))\n file.close()\n\n # cm.print_stats()\n # print confusion matrix\n cm.plot(normalized=True)\n plt.show()", "def quick_score(clf, X, y, cv=5, n_jobs=20):\n\n return jjcross_val_score(clf, X, y, mean_absolute_error, cv, n_jobs=n_jobs).mean()", "def testaccurary(C):\r\n acc=np.zeros((nFolds))\r\n for i in range(0,nFolds):\r\n \r\n w,b=svmfit(i,C)\r\n y_predict=predict(test_x,w,b)\r\n k=np.sum(np.absolute(y_predict-test_y))/2\r\n n=len(test_y)\r\n acc[i]=1-k/n\r\n \r\n accurary=np.mean(acc)\r\n \r\n return accurary", "def get_score(data, labels, fold_pairs, name, model, param, numTopVars,\r\n rank_per_fold=None, parallel=True, rand_iter=-1):\r\n assert isinstance(name, str)\r\n logging.info(\"Classifying %s\" % name)\r\n ksplit = len(fold_pairs)\r\n# if name not in NAMES:\r\n# raise ValueError(\"Classifier %s not supported. \"\r\n# \"Did you enter it properly?\" % name)\r\n\r\n # Redefine the parameters to be used for RBF SVM (dependent on\r\n # training data)\r\n if \"SGD\" in name:\r\n param[\"n_iter\"] = [25] # [np.ceil(10**3 / len(fold_pairs[0][0]))]\r\n classifier = get_classifier(name, model, param, rand_iter=rand_iter)\r\n \r\n if name == \"RBF SVM\": #This doesn't use labels, but looks as ALL data\r\n logging.info(\"RBF SVM requires some preprocessing.\"\r\n \"This may take a while\")\r\n #\r\n is_data_computed_gamma = True\r\n #\r\n if not is_data_computed_gamma:\r\n # Sahil commented the code below that computes the gamma choices from data.\r\n # The computed gamma choices seem too low thereby making SVM very slow. Instead, trying out fixed values.\r\n print param\r\n gamma = param['gamma']\r\n gamma = np.array(gamma)\r\n print 'gamma', gamma\r\n else:\r\n #Euclidean distances between samples\r\n # sahil switched from the first call to second one for computing the dist as the first one is giving error.\r\n # dist = pdist(StandardScaler().fit(data), \"euclidean\").ravel()\r\n dist = pdist(RobustScaler().fit_transform(data), \"euclidean\").ravel()\r\n print 'dist', dist\r\n #Estimates for sigma (10th, 50th and 90th percentile)\r\n sigest = np.asarray(np.percentile(dist, [10, 50, 90]))\r\n print 'sigest', sigest\r\n #Estimates for gamma (= -1/(2*sigma^2))\r\n gamma = 1./(2*sigest**2)\r\n print 'gamma', gamma\r\n #\r\n #\r\n #Set SVM parameters with these values\r\n # sahil changed the code a bit to remove a bug\r\n # param = [{\"kernel\": [\"rbf\"],\r\n # \"gamma\": gamma.tolist(),\r\n # \"C\": np.logspace(-2,2,5).tolist()}]\r\n param = {\"kernel\": [\"rbf\"],\r\n \"gamma\": gamma.tolist(),\r\n \"C\": np.logspace(-2, 2, 5).tolist()}\r\n # if name not in [\"Decision Tree\", \"Naive Bayes\"]:\r\n if param:\r\n if hasattr(classifier,'param_grid'): \r\n # isinstance(classifier, GridSearchCV):\r\n print 'param', param\r\n N_p = np.prod([len(l) for l in param.values()])\r\n elif isinstance(classifier, RandomizedSearchCV):\r\n N_p = classifier.n_iter\r\n else:\r\n N_p = 1\r\n# is_cv = isinstance(classifier, GridSearchCV) or \\\r\n# isinstance(classifier, RandomizedSearchCV)\r\n# print('Name: {}, ksplit: {}, N_p: {}'.format(name, ksplit, N_p))\r\n if (not parallel) or ksplit <= N_p or \\\r\n (name == \"Random Forest\") or (\"SGD\" in name):\r\n logging.info(\"Attempting to use grid search...\")\r\n classifier.n_jobs = PROCESSORS\r\n classifier.pre_dispatch = 1 # np.floor(PROCESSORS/24)\r\n allConfMats = []\r\n allTotalErrs = []\r\n allFittedClassifiers = []\r\n for i, fold_pair in enumerate(fold_pairs):\r\n confMats = []\r\n totalErrs = []\r\n fitted_classifiers = []\r\n logging.info(\"Classifying a %s the %d-th out of %d folds...\"\r\n % (name, i+1, len(fold_pairs)))\r\n if rank_per_fold is not None:\r\n rankedVars = rank_per_fold[i]\r\n else:\r\n rankedVars = np.arange(data.shape[1])\r\n #\r\n for numVars in numTopVars:\r\n logging.info('Classifying for top %i variables' % numVars)\r\n #\r\n # print 'rankedVars', rankedVars\r\n #\r\n confMat, totalErr, fitted_classifier = classify(data[:, rankedVars[:numVars]],\r\n labels,\r\n fold_pair,\r\n classifier)\r\n confMats.append(confMat)\r\n totalErrs.append(totalErr)\r\n fitted_classifiers.append(fitted_classifier)\r\n # recheck the structure of area and fScore variables\r\n allConfMats.append(confMats)\r\n allTotalErrs.append(totalErrs)\r\n allFittedClassifiers.append(fitted_classifiers)\r\n else:\r\n print 'parallel computing going on (debug Sahil ...) ..........................'\r\n #\r\n classifier.n_jobs = PROCESSORS\r\n logging.info(\"Multiprocessing folds for classifier {}.\".format(name))\r\n pool = Pool(processes=min(ksplit, PROCESSORS))\r\n out_list = pool.map(per_split_classifier(data, labels, classifier,\r\n numTopVars),\r\n zip(rank_per_fold, fold_pairs))\r\n pool.close()\r\n pool.join()\r\n #allConfMats = [el[0] for el in out_list]\r\n #allTotalErrs = [el[1] for el in out_list]\r\n #allFittedClassifiers = [el[2] for el in out_list]\r\n allConfMats, allTotalErrs, allFittedClassifiers = tuple(zip(*out_list))\r\n return classifier, allConfMats, allTotalErrs, allFittedClassifiers", "def score_features(self, features, predictor, cv_fold, verbose=0):\n # First we optimise the hyper parameters:\n # data has 4 keys but only 2 (x_train and y_train) will be used for the optimization\n best_params = optimize_hyper_parameters(features, predictor, cv_fold, verbose)\n predictor.set_hyper_parameters(best_params)\n\n # Then we fit the predictor:\n predictor.fit(features)\n\n # Afterwards, we generate the prediction\n y_pred = predictor.predict(features)\n\n # Finally, we compute the metrics:\n metric_res = score_prediction(features['y_test'], y_pred)\n\n self.predictor = predictor\n\n return metric_res, best_params", "def cv_performance(posTrainData,negTrainData, num_folds):\n length = len(negTrainData)\n splits = split_cv(length, num_folds)\n accuracy_array = []\n for split in splits:\n accuracy = 0\n train_pos = []\n train_neg = []\n test_neg = []\n test_pos = []\n for x in split.train:\n train_pos.append(posTrainData[x])\n train_neg.append(negTrainData[x])\n for x in split.test:\n test_pos.append(posTrainData[x])\n test_neg.append(negTrainData[x])\n nb = Nb(train_pos,train_neg)\n confusion=nb.confusion_matrix(test_pos,test_neg)\n accuracy = nb.accuracy(confusion)\n accuracy_array.append(accuracy)\n\n return accuracy_array", "def trainaccurary(C):\r\n acc=np.zeros((nFolds))\r\n for i in range(0,nFolds):\r\n \r\n train_x, train_y, valid_x, valid_y=get_next_train_valid(i)\r\n w,b=svmfit(i,C)\r\n y_predict=predict(train_x,w,b)\r\n train_y=train_y.reshape(len(train_y),1)\r\n k=np.sum(np.absolute(y_predict-train_y))/2\r\n n=len(train_y)\r\n acc[i]=1-k/n\r\n \r\n accurary=np.mean(acc)\r\n \r\n return accurary", "def cv_5_fold(dataFrame):\n dataframe_collection = {}\n i = 0\n j = 0\n l = 0\n guessed_right = 0\n k = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39]\n\n k_values = []\n # array to store the accuracy evaluation for each number of K\n accuracy_values = {}\n\n myDict = {}\n for j in range(len(k)): # for all values of K neighbour\n\n print(k[j])\n predicted_right = 0\n total_number = 0\n five_accuracies = []\n for i in range(0, 5):\n #aggregating dataframes by fold - e.g. 1 fold becomes test dataframe; 2,3,4,5 folds become one training dataframe\n trainingDataFrame = dataFrame.loc[dataFrame[15] != (i / 4.00)]\n trainingDataFrame = trainingDataFrame.drop([15], axis=1).reset_index(drop=True)\n testDataFrame = dataFrame.loc[dataFrame[15] == (i / 4.00)]\n testDataFrame = testDataFrame.drop([15], axis=1).reset_index(drop=True)\n\n # output is an array of predicted income values for testDataFrame\n output = knn(trainingDataFrame, testDataFrame, k[j])\n\n # for every fold validation loop calculate the accuracy:\n for instance in range(len(testDataFrame)):\n # checking number of right predictions\n if (output[instance] == testDataFrame[14].iloc[instance]):\n predicted_right += 1.00\n total_number += 1.00\n\n # calculate accuracy as percentage of number of prediction divided by total\n accuracy = (predicted_right / total_number) * 100.0\n # add acccuracies for each of the 5 fold tests to an array\n five_accuracies.append(accuracy)\n\n # PROVIDE FINAL EVALUATION FOR K = J, BY FINDING OUT AVERAGE ACCURACY OF THE FIVE FOLD LOOPS:\n evaluation = 0.0\n for accuracy in range(len(five_accuracies)):\n evaluation += five_accuracies[accuracy]\n\n evaluation = evaluation / 5\n\n accuracy_values.update({k[j]: evaluation})\n\n accuracy_values = collections.OrderedDict(sorted(accuracy_values.items()))\n\n # compute which number of neigbors garners greatest accuracy:\n maxAccuracy = 0\n best_neighbour = 0\n # loop through dictionary values:\n for v in accuracy_values.items():\n # if the value is greater than the current maximum, make it the maximum\n if (v[1] > maxAccuracy):\n maxAccuracy = v[1]\n best_neighbour = v[0]\n\n print(\"Max accuracy \", maxAccuracy)\n print(\"Best Neighbor: \", best_neighbour)\n\n # make a text file containing the K-number and associated accuracy:\n str_x = \"k value | accuracy\" + \"\\n\"\n for k, v in accuracy_values.items():\n str_x += str(k) + \" | \" + str(v) + \"\\n\"\n print(str_x)\n\n text_file = open(\"grid.results.txt\", 'w')\n text_file.write(str_x)\n text_file.close()", "def generate_cross_val_score(clf, data, target, cv):\n return cross_val_score(clf, data, target, cv=cv)", "def objective(params, n_folds=N_FOLDS):\n\n # Keep track of evals\n global ITERATION\n\n ITERATION += 1\n\n # Retrieve the subsample if present otherwise set to 1.0\n subsample = params['boosting_type'].get('subsample', 1.0)\n\n # Extract the boosting type\n params['boosting_type'] = params['boosting_type']['boosting_type']\n params['subsample'] = subsample\n\n # Make sure parameters that need to be integers are integers\n for parameter_name in ['max_depth', 'subsample_for_bin', 'min_child_samples','min_child_weight','num_parallel_tree']:\n params[parameter_name] = int(params[parameter_name])\n\n start = timer()\n\n print('params',params)\n # Perform n_folds cross validation\n cv_results = xgb.cv(params, train_set,\n num_boost_round=3000,\n nfold=n_folds,\n stratified=True,\n early_stopping_rounds=100,\n feval=tpr_weight_funtion_xgb_cv,\n seed=50,\n verbose_eval=True,\n\n )\n\n print('cv_results\\n',type(cv_results),'\\n',cv_results)\n\n run_time = timer() - start\n\n # Extract the best score\n best_score = np.min(cv_results['test-TPR-mean'])\n\n # Loss must be minimized\n loss = best_score\n\n TPR_std = cv_results[cv_results['test-TPR-mean']==best_score]['test-TPR-std'].values[0]\n print('TPR_stdv', TPR_std)\n\n\n # Boosting rounds that returned the highest cv score\n n_estimators = int(np.argmin(cv_results['test-TPR-mean']) + 1)\n\n # Write to the csv file ('a' means append)\n of_connection = open(out_file, 'a')\n writer = csv.writer(of_connection)\n writer.writerow([loss,TPR_std, params, ITERATION, n_estimators, run_time])\n\n # Dictionary with information for evaluation\n return {'loss': loss,'TPR_std':TPR_std, 'params': params, 'iteration': ITERATION,\n 'estimators': n_estimators,\n 'train_time': run_time, 'status': STATUS_OK}", "def nested_cv(X, y, model, n_splits, n_folds, unique_id):\n \n cv = StratifiedKFold(n_splits=n_splits,\n shuffle=True,\n random_state=42) # Outer CV\n \n i_start = 0\n i_list = []\n results_df = None\n cv_path = unique_id + '_NestedCV.pkl'\n \n if os.path.isfile(cv_path) == True: # If CV is incomplete, resume\n results_df = pd.read_pickle(cv_path)\n i_start = results_df.Outer_fold.max() + 1\n print('Resuming cross-validation from fold ' + str(i_start + 1))\n \n # Generate indices to split data by StratifiedKFold\n # Append indices for each fold to list \n for tr_i, te_i in cv.split(X,y):\n i_list.append([tr_i, te_i])\n \n # For each fold...\n for i in range(i_start, len(i_list)):\n results_list = []\n print('Beginning fold ' + str(i+1) + ' of ' + str(len(i_list)))\n \n # Split data into training and test tests\n X_train = X.loc[X.index.intersection(i_list[i][0])]\n y_train = y.loc[y.index.intersection(i_list[i][0])]\n X_test = X.loc[X.index.intersection(i_list[i][1])]\n y_test = y.loc[y.index.intersection(i_list[i][1])]\n\n start = time.time()\n \n # Fit the HyperoptEstimator to training data (optimise model)\n model.fit(X_train,\n y_train,\n n_folds=n_folds, # Inner stratified k-fold CV\n cv_shuffle=True)\n \n end = time.time()\n duration = end - start\n\n # Use optimised model to predict labels for test data\n y_pred = model.predict(X_test)\n score = f1_score(y_test, y_pred, average='weighted') # Evaluate\n \n # Everything below: formats and/or calculates results for output file\n sorted_labels = np.sort(y_test.unique())\n unweighted_score = f1_score(y_test, y_pred,\n average=None,\n labels=sorted_labels)\n c_matrix = confusion_matrix(y_test, y_pred,\n labels=sorted_labels)\n\n for trial in range(len(model.trials.trials)):\n if model.trials.trials[trial].get('result').get('status') == 'ok':\n trial_loss = model.trials.trials[trial].get('result').get('loss')\n trial_duration = model.trials.trials[trial].get('result').get('duration')\n else:\n trial_loss = np.nan\n trial_duration = np.nan\n \n results_list.append([i,\n score,\n unweighted_score,\n le.inverse_transform(sorted_labels),\n c_matrix,\n duration,\n trial,\n trial_loss,\n trial_duration])\n \n append_df = pd.DataFrame(results_list,\n columns=['Outer_fold',\n 'Outer_score',\n 'Outer_unweighted_scores',\n 'Outer_unweighted_score_labels',\n 'Outer_confusion_matrix',\n 'Outer_training_duration',\n 'Trial',\n 'Trial_loss',\n 'Trial_duration'])\n if i == i_start:\n if results_df is not None:\n final_df = pd.concat([results_df,\n append_df],\n ignore_index=True)\n else:\n final_df = append_df\n final_df.to_pickle(cv_path)\n \n else:\n results_df = pd.read_pickle(cv_path)\n final_df = pd.concat([results_df,\n append_df],\n ignore_index=True)\n final_df.to_pickle(cv_path)", "def score_cv(data, dim, lag, number_of_splits=10, validation_fraction=0.5):\n # we temporarily suppress very short-lived progress bars\n with pyemma.util.contexts.settings(show_progress_bars=False):\n if type(data) == list:\n nval = int(len(data) * validation_fraction)\n elif data._is_reader == True:\n nval = data.number_of_trajectories()\n else:\n raise ValueError(\"data must be list of numpy arrays or pyemma reader object\")\n scores = np.zeros(number_of_splits)\n for n in range(number_of_splits):\n if type(data) == list:\n ival = np.random.choice(len(data), size=nval, replace=False)\n elif data._is_reader == True:\n ival = np.random.choice(data.number_of_trajectories(), size=nval, replace=False)\n vamp = coor.vamp(\n [d for i, d in enumerate(data) if i not in ival], lag=lag, dim=dim)\n scores[n] = vamp.score([d for i, d in enumerate(data) if i in ival])\n return scores", "def kfold_scoring(self, data_frame, target, pipeline):\n\n fold_score = []\n macro = ['recall', 'f1', 'precision']\n number_of_folds = -1\n Folds = {}\n\n kf = KFold(n_splits=10, random_state=None, shuffle=True)\n\n for train_index, test_index in kf.split(data_frame):\n X_train = data_frame[train_index]\n X_test = data_frame[test_index]\n y_train = target[train_index]\n y_test = target[test_index]\n number_of_folds = number_of_folds + 1\n # Append the predicted labels.\n y_predict = self.fit_predict_model(X_train, y_train, X_test, pipeline)\n\n Folds[str(number_of_folds)] = {\n \"predicted\": y_predict,\n \"Actual\": y_test\n }\n\n if self.problem_type == 'regression':\n if self.scoring is not None:\n result = self.regression_scoring_function[self.scoring](y_predict, y_test)\n else:\n result = self.regression_scoring_function['r2_score'](y_predict, y_test)\n else:\n if self.scoring is not None:\n if self.scoring not in macro:\n result = self.classification_scoring_function[self.scoring](\n y_predict, y_test)\n else:\n result = self.classification_scoring_function[self.scoring](\n y_predict, y_test, average='macro')\n else:\n result = self.classification_scoring_function['f1'](\n y_predict, y_test, average='macro')\n\n fold_score.append(result)\n self.pipeline_dict['folds'] = Folds\n return np.mean(fold_score)", "def hyperparameter_cv(X_data, y_data, hyperparameters):\n\n # Create Grid of hyperparameters\n grid = cartesian_product(hyperparameters)\n\n # Loop through hyperparameters \n best_score = 0\n for hyperparameter in grid:\n # Initialize Modle\n model = svm.SVC(kernel='linear', **hyperparameter)\n\n # Train and Get Accuracy\n print(f\"Training using hyperparameters: {hyperparameter}\")\n score = cross_validation_score(X_data, y_data, model, folds=5)\n print(f\"Accuracy Score: {score}\")\n\n if score > best_score:\n best_score = score\n best_parameters = hyperparameter\n \n return best_score, best_parameters", "def run_cv_pred(X, y, clf, n_folds):\n # Construct a kfolds object\n skf = StratifiedKFold(n_splits=n_folds)\n splits = skf.split(X, y)\n y_pred = y.copy()\n\n # Iterate through folds\n for idx, (train_index, test_index) in enumerate(splits):\n X_train, X_test = X[train_index], X[test_index]\n y_train = y[train_index]\n # Initialize a classifier with key word arguments\n clf.fit(X_train, y_train)\n try: # Gradient boosted trees do not accept sparse matrices in the predict function currently\n preds = clf.predict(X_test)\n except TypeError:\n preds = clf.predict(X_test.todense())\n y_pred[test_index] = preds\n\n return y_pred", "def do_crossval():\n df = read_df()\n # X = df['review'].apply(remove_html_lower)\n\n X = df['review']\n y = df['sentiment']\n X_train, X_holdout, y_train, y_holdout = train_test_split(X, y, test_size=0.3, shuffle=True, stratify=y, random_state=222 )\n\n tfidf = TfidfVectorizer(stop_words='english', min_df=2, max_df=0.8, ngram_range=(1,4))\n stem_pipeline = make_pipeline(TextNormalizer(), tfidf, LogisticRegression(C=100))\n cv = StratifiedShuffleSplit(n_splits=3, test_size=0.2)\n\n scores = cross_val_score(stem_pipeline, X_train, y_train, cv=cv, scoring='accuracy', n_jobs=-1)\n print(scores, scores.mean())", "def run_CV(X,y,model,func, n_splits = 3, how = 'up', categorical = 'label_encoder'):\n logloss = []\n skf = StratifiedKFold(n_splits = n_splits, random_state = 144)\n for i, (train_idx, val_idx) in enumerate(skf.split(X,y)):\n X_train, X_val = X.iloc[train_idx], X.iloc[val_idx]\n y_train, y_val = y[train_idx], y[val_idx]\n\n # # SMOTE\n # X_train = X_train.drop('poor', axis = 1) # drop target\n # cat_columns = X_train.select_dtypes(['object']).columns\n # X_train[cat_columns] = X_train[cat_columns].apply(LabelEncoder().fit_transform)\n # orig_cols = X_train.columns # SMOTE will return a numpy array. Store the column names here to recreate the dataframe for feature engineering/transforms below\n # X_train, y_train = SMOTE().fit_sample(X_train,y_train)\n # # recreate dataframe\n # X_train = pd.DataFrame(X_train, columns = orig_cols)\n\n if how is not None:\n # resample to balance data\n X_resampled = resample_data(X_train, how = how)\n # store the targets now that they are balanced\n y_train = X_resampled['poor']\n # drop target from train\n X_train = X_resampled.drop('poor', axis = 1)\n X_val.drop('poor', axis = 1, inplace = True)\n # print(X_val.columns.values)\n ####### feature engineering goes blow this comment:\n \n func(X_train)\n func(X_val)\n \n ###### end feature eng\n X_train = pre_process_data(X_train, normalize_num='standardize', categorical = categorical)\n assert X_train.shape[0] == y_train.shape[0]\n\n model.fit(X_train, y_train)\n # standardize X_val to predict\n X_val = pre_process_data(X_val,normalize_num= 'standardize', enforce_cols=X_train.columns, categorical = categorical)\n preds = model.predict_proba(X_val)\n \n logloss.append(log_loss(y_val, preds[:,1]))\n \n return logloss", "def score(self, candidate_holder, new_scores):\n cand_seqs = candidate_holder.cand_seqs\n cand_states = candidate_holder.cand_states\n cand_syms = cand_seqs[:, -1]\n\n cand_state_value = []\n cand_score_value = []\n for j in range(cand_states[self.state_index].shape[0]):\n cand_state_value.append(cand_states[self.state_index][j][cand_syms[j]])\n cand_score_value.append(cand_states[self.score_index][j][cand_syms[j]])\n ctc_score_result = []\n ctc_score_total = []\n new_states = []\n for i in tf.range(new_scores.shape[0]):\n num_sym_state = np.array([self.init_state] * self.num_classes)\n num_sym_score = np.array([0.0] * self.num_classes, dtype=np.float32)\n num_sym_score_minus = np.array([0.0] * self.num_classes, dtype=np.float32)\n cand_seq = cand_seqs[i]\n ctc_pre_state = cand_state_value[i]\n top_ctc_candidates = np.argsort(new_scores[i, :])\n top_ctc_candidates = sorted(top_ctc_candidates[-self.ctc_beam :].tolist())\n cand_seq = np.array(cand_seq)\n top_ctc_candidates = np.array(top_ctc_candidates)\n ctc_pre_state = ctc_pre_state.numpy()\n ctc_score, new_state = self.cand_score(\n cand_seq, top_ctc_candidates, ctc_pre_state\n )\n ctc_pre_score = tf.cast(cand_score_value[i], tf.float32)\n ctc_score_minus = self.ctc_weight * (ctc_score - ctc_pre_score) + 500\n\n for k in range(len(top_ctc_candidates)):\n num_sym_score[top_ctc_candidates[k]] = ctc_score[k]\n num_sym_score_minus[top_ctc_candidates[k]] = ctc_score_minus[k]\n num_sym_state[top_ctc_candidates[k]] = new_state[k]\n num_sym_score_minus -= 500\n ctc_score_result.append(num_sym_score_minus)\n ctc_score_total.append(num_sym_score)\n new_states.append(num_sym_state)\n cand_states[self.state_index] = tf.convert_to_tensor(np.array(new_states))\n ctc_score_result = tf.convert_to_tensor(np.array(ctc_score_result))\n ctc_score_total = tf.convert_to_tensor(np.array(ctc_score_total))\n cand_states[self.score_index] = ctc_score_total\n return ctc_score_result, cand_states", "def perform_cv_fold(self, algo, fold, folds):\n # TODO: this is only done for hyperparameter optimization and is not\n # part of the OpenML specification. The OpenML specification would\n # like to have the hyperparameter evaluation inside the evaluate\n # performed by the target algorithm itself. Hyperparameter\n # optimization on the other hand needs these both things to be decoupled\n # For being closer to OpenML one could also call evaluate and pass\n # everything else through kwargs.\n if self.task_type != \"Supervised Classification\":\n raise NotImplementedError(self.task_type)\n\n if self.estimation_procedure[\"type\"] != \\\n \"crossvalidation with holdout\":\n raise NotImplementedError(self.estimation_procedure[\"type\"] )\n\n if self.estimation_procedure[\"parameters\"][\"stratified_sampling\"] != \\\n 'true':\n raise NotImplementedError(self.estimation_procedure[\"parameters\"][\"stratified_sampling\"])\n\n if self.evaluation_measure != \"predictive_accuracy\":\n raise NotImplementedError(self.evaluation_measure)\n\n ########################################################################\n # Test folds\n train_indices, test_indices = self.get_train_test_split()\n\n ########################################################################\n # Crossvalidation folds\n train_indices, validation_indices = self.get_validation_split(fold)\n\n X, Y = self.get_dataset()\n\n algo.fit(X[train_indices], Y[train_indices])\n\n predictions = algo.predict(X[validation_indices])\n accuracy = sklearn.metrics.accuracy_score(Y[validation_indices], predictions)\n return accuracy", "def objective(params, n_folds=N_FOLDS):\n\n # Perform n_fold cross validation with hyperparameters\n # Use early stopping and evalute based on ROC AUC\n params['num_leaves'] = int(params['num_leaves'])\n params['min_data_in_leaf'] = int(params['min_data_in_leaf'])\n params['max_bin'] = int(params['max_bin'])\n # params['min_child_samples'] = int(params['min_child_samples'])\n print(params)\n cv_results = lgb.cv(params, train_data, nfold=n_folds, num_boost_round=5000,\n early_stopping_rounds=40, metrics='auc', seed=50)\n print(cv_results)\n save_log(str(params) + \"\\n\\n\\n\\n\\n\" + str(cv_results))\n # Extract the best score\n best_score = max(cv_results['auc-mean'])\n\n # Loss must be minimized\n loss = 1 - best_score\n\n # Dictionary with information for evaluation\n return {'loss': loss, 'params': params, 'status': STATUS_OK}", "def kFoldCrossValidation(self, n_splits ):\n X = self.X\n y = self.y\n\n k_fold = KFold(n_splits)\n model = self.model\n\n for train, test in k_fold.split(X):\n model.fit(X[train], y[train])\n p = model.predict( X[test] )\n # Add line for scores\n\n return model #return scores here?", "def performance_metrics(model, X_train, y_train, X_test, y_test, train=True, cv=True):\n from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, f1_score \n from sklearn.metrics import precision_score, recall_score, roc_auc_score\n from sklearn.model_selection import cross_validate, cross_val_score, StratifiedKFold\n scoring = {'acc': 'accuracy',\n 'prec_micro': 'precision_micro',\n 'rec_micro': 'recall_micro',\n 'f1_micro': 'f1_micro',\n 'auc':'roc_auc'} \n if train==True:\n if cv==True:\n kfold=StratifiedKFold(n_splits=10, random_state=42)\n scores = cross_validate(model, X_train, y_train, scoring=scoring, cv=kfold)\n ypredTrain = model.predict(X_train)\n Acc_train = scores['test_acc'].mean()\n Precision_train = scores['test_prec_micro'].mean()\n Recall_train = scores['test_rec_micro'].mean()\n F1_train = scores['test_f1_micro'].mean()\n AUC_train = scores['test_auc'].mean()\n conf_matrix_train = confusion_matrix(y_train, ypredTrain)\n class_report = classification_report(y_train, ypredTrain)\n print(\"TRAIN:\\n===========================================\")\n print(f\"CV - Accuracy : {Acc_train:.2f}\\n\")\n print(f\"CV - Precision: {Precision_train:.2f}\\n\")\n print(f\"CV - Recall: {Recall_train:.2f}\\n\")\n print(f\"CV - F1 score: {F1_train:.2f}\\n\") \n print(f\"CV - AUC score: {AUC_train:.2f}\\n\") \n print(f\"Confusion Matrix:\\n {conf_matrix_train}\\n\")\n print(f\"Classification Report:\\n {class_report}\\n\") \n elif cv==False:\n scores = cross_validate(model, X_train, y_train, scoring=scoring)\n ypredTrain = model.predict(X_train)\n Acc_train = scores['test_acc'].mean()\n Precision_train = scores['test_prec_micro'].mean()\n Recall_train = scores['test_rec_micro'].mean()\n F1_train = scores['test_f1_micro'].mean()\n AUC_train = scores['test_auc'].mean()\n conf_matrix_train = confusion_matrix(y_train, ypredTrain)\n class_report = classification_report(y_train, ypredTrain)\n print(\"TRAIN:\\n===========================================\")\n print(f\"CV - Accuracy : {Acc_train:.2f}\\n\")\n print(f\"CV - Precision: {Precision_train:.2f}\\n\")\n print(f\"CV - Recall: {Recall_train:.2f}\\n\")\n print(f\"CV - F1 score: {F1_train:.2f}\\n\") \n print(f\"CV - AUC score: {AUC_train:.2f}\\n\") \n print(f\"Confusion Matrix:\\n {conf_matrix_train}\\n\")\n print(f\"Classification Report:\\n {class_report}\\n\")\n elif train==False:\n if cv==True:\n kfold=StratifiedKFold(n_splits=10, random_state=42)\n scores = cross_validate(model, X_test, y_test, scoring=scoring, cv=kfold)\n ypredTest = model.predict(X_test)\n Acc_test = scores['test_acc'].mean()\n Precision_test = scores['test_prec_micro'].mean()\n Recall_test = scores['test_rec_micro'].mean()\n F1_test = scores['test_f1_micro'].mean()\n AUC_test = scores['test_auc'].mean()\n conf_matrix_test = confusion_matrix(y_test, ypredTest)\n class_report = classification_report(y_test, ypredTest) \n print(\"TEST:\\n===========================================\")\n print(f\"CV - Accuracy : {Acc_test:.2f}\\n\")\n print(f\"CV - Precision: {Precision_test:.2f}\\n\")\n print(f\"CV - Recall: {Recall_test:.2f}\\n\")\n print(f\"CV - F1 score: {F1_test:.2f}\\n\") \n print(f\"CV - AUC score: {AUC_test:.2f}\\n\") \n print(f\"Confusion Matrix:\\n {conf_matrix_test}\\n\")\n print(f\"Classification Report:\\n {class_report}\\n\")\n elif cv==False:\n scores = cross_validate(model, X_test, y_test, scoring=scoring)\n ypredTest = model.predict(X_test)\n Acc_test = scores['test_acc'].mean()\n Precision_test = scores['test_prec_micro'].mean()\n Recall_test = scores['test_rec_micro'].mean()\n F1_test = scores['test_f1_micro'].mean()\n AUC_test = scores['test_auc'].mean()\n conf_matrix_test = confusion_matrix(y_test, ypredTest)\n class_report = classification_report(y_test, ypredTest) \n print(\"TEST:\\n===========================================\")\n print(f\"CV - Accuracy : {Acc_test:.2f}\\n\")\n print(f\"CV - Precision: {Precision_test:.2f}\\n\")\n print(f\"CV - Recall: {Recall_test:.2f}\\n\")\n print(f\"CV - F1 score: {F1_test:.2f}\\n\") \n print(f\"CV - AUC score: {AUC_test:.2f}\\n\") \n print(f\"Confusion Matrix:\\n {conf_matrix_test}\\n\")\n print(f\"Classification Report:\\n {class_report}\\n\")", "def objective(params, n_folds = N_FOLDS):\n # Perform n_fold cross validation with hyperparameters\n # Use early stopping and evalute based on ROC AUC\n cv_results = lgb.cv(params, lgb_train, nfold = n_folds, num_boost_round = 10000, early_stopping_rounds = 100, metrics = 'auc', seed = 50)\n # Extract the best score\n best_score = max(cv_results['auc-mean'])\n # Loss must be minimized\n loss = 1 - best_score\n # Dictionary with information for evaluation\n return {'loss': loss, 'params': params, 'status': STATUS_OK}", "def majority_vote():\n iris = datasets.load_iris()\n x_vals, y_vals = iris.data[50:, [1, 2]], iris.target[50:]\n labenc = LabelEncoder()\n y_vals = labenc.fit_transform(y_vals)\n x_train, x_test, y_train, y_test = train_test_split(x_vals, y_vals,\n test_size=0.5, random_state=1)\n\n clf1 = LogisticRegression(penalty='l2', C=0.001, random_state=0)\n clf2 = DecisionTreeClassifier(max_depth=1, criterion='entropy', random_state=0)\n clf3 = KNeighborsClassifier(n_neighbors=1, p=2, metric='minkowski')\n pipe1 = Pipeline([['sc', StandardScaler()], ['clf', clf1]])\n pipe3 = Pipeline([['sc', StandardScaler()], ['clf', clf3]])\n clf_labels = ['Logistic Regression', 'Decision Tree', 'KNN']\n\n # Majority Rule (hard) Voting\n mv_clf = MajorityVoteClassifier(classifiers=[pipe1, clf2, pipe3])\n\n clf_labels += ['Majority Voting']\n all_clf = [pipe1, clf2, pipe3, mv_clf]\n print('10-fold cross validation:\\n')\n for clf, label in zip(all_clf, clf_labels):\n scores = cross_val_score(estimator=clf, X=x_train, y=y_train, cv=10, scoring='roc_auc')\n print(\"ROC AUC: %0.2f (+/- %0.2f) [%s]\" % (scores.mean(), scores.std(), label))\n\n colors = ['black', 'orange', 'blue', 'green']\n linestyles = [':', '--', '-.', '-']\n for clf, label, clr, lin_style in zip(all_clf, clf_labels, colors, linestyles):\n # assuming the label of the positive class is 1\n y_pred = clf.fit(x_train, y_train).predict_proba(x_test)[:, 1]\n fpr, tpr, _ = roc_curve(y_true=y_test, y_score=y_pred)\n print(y_pred)\n roc_auc = auc(x=fpr, y=tpr)\n plt.plot(fpr, tpr, color=clr, linestyle=lin_style,\n label='%s (auc = %0.2f)' % (label, roc_auc))\n\n plt.legend(loc='lower right')\n plt.plot([0, 1], [0, 1], linestyle='--', color='gray', linewidth=2)\n\n plt.xlim([-0.1, 1.1])\n plt.ylim([-0.1, 1.1])\n plt.grid()\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'roc.png', dpi=300)\n plt.close()\n\n stdc = StandardScaler()\n x_train_std = stdc.fit_transform(x_train)\n all_clf = [pipe1, clf2, pipe3, mv_clf]\n x_min = x_train_std[:, 0].min() - 1\n x_max = x_train_std[:, 0].max() + 1\n y_min = x_train_std[:, 1].min() - 1\n y_max = x_train_std[:, 1].max() + 1\n xxx, yyy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, 0.1))\n _, axarr = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(7, 5))\n for idx, clf, ttt in zip(product([0, 1], [0, 1]), all_clf, clf_labels):\n clf.fit(x_train_std, y_train)\n z_vals = clf.predict(np.c_[xxx.ravel(), yyy.ravel()])\n z_vals = z_vals.reshape(xxx.shape)\n axarr[idx[0], idx[1]].contourf(xxx, yyy, z_vals, alpha=0.3)\n axarr[idx[0], idx[1]].scatter(x_train_std[y_train == 0, 0], x_train_std[y_train == 0, 1],\n c='blue', marker='^', s=50)\n axarr[idx[0], idx[1]].scatter(x_train_std[y_train == 1, 0], x_train_std[y_train == 1, 1],\n c='red', marker='o', s=50)\n axarr[idx[0], idx[1]].set_title(ttt)\n plt.text(-3.5, -4.5, s='Sepal width [standardized]', ha='center', va='center', fontsize=12)\n plt.text(-10.5, 4.5, s='Petal length [standardized]', ha='center', va='center',\n fontsize=12, rotation=90)\n plt.tight_layout()\n plt.savefig(IMG_PATH + 'voting_panel.png', bbox_inches='tight', dpi=300)\n # print(mv_clf.get_params())\n params = {'decisiontreeclassifier__max_depth': [1, 2],\n 'pipeline-1__clf__C': [0.001, 0.1, 100.0]}\n grid = GridSearchCV(estimator=mv_clf, param_grid=params, cv=10, scoring='roc_auc')\n grid.fit(x_train, y_train)\n\n for params, mean_score, scores in grid.cv_results_:\n print(\"%0.3f+/-%0.2f %r\" % (mean_score, scores.std() / 2, params))\n print('Best parameters: %s' % grid.best_params_)\n print('Accuracy: %.2f' % grid.best_score_)", "def cvWorker(epochs, theta, folds, trainFunc, testFunc, report, queue):\n\n # track how many correct predictions are made over all folds with current hyperparams\n totalCorrect = 0;\n totalAttempts = 0;\n for (i,f) in enumerate(folds): \n \n testFold = f;\n trainFold = reduce(operator.add, folds[:i] + folds[i+1:]); # flatten training fold \n \n # learn weights \n w = trainFunc(trainFold, epochs, theta);\n \n # accumulate test accuracy\n [correct, attempts] = testFunc(w, testFold);\n totalCorrect += correct;\n totalAttempts += attempts; \n \n # update based on results and post to queue\n rate = totalCorrect / totalAttempts;\n if not (report == None):\n tabs = '\\t' * report;\n print tabs, 'Cross validation accuracy=', rate, 'for theta=', theta;\n results = (theta, rate); \n queue.put(results)\n \n return;", "def roc_score(x, y, model):\n # We use k-fold cross-validation and average the scores.\n kfold = KFold(n_splits=5)\n scores = []\n for train_index, test_index in kfold.split(x):\n x_train = x[train_index]\n y_train = y[train_index]\n x_test = x[test_index]\n y_test = y[test_index]\n score = roc_auc_score(\n y_test, make_prediction(x_train, y_train, x_test, model))\n scores.append(score)\n return np.mean(scores)", "def train_cv(X_train, Y_train, nfold = 5, early_stopping_rounds = 20):\n # model params\n params = { \"objective\" : \"multiclass\",\n \"num_class\" : 6,\n \"verbosity\" : -1 }\n\n # create dataset for lightgbm\n lgb_train = lgb.Dataset(X_train, Y_train)\n \n # cross validate to find optimal no of iterations\n r = lgb.cv(params, \n lgb_train, \n 10000,\n early_stopping_rounds = early_stopping_rounds,\n nfold = nfold,\n feval = accuracy_error,\n metrics = 'None',\n verbose_eval = True,\n seed = 42)\n\n # Highest score\n r_best = np.max(r['accuracy-mean'])\n\n # best number of estimators\n best_estimators = np.argmax(r['accuracy-mean']) + 1\n print(best_estimators)\n\n print(f'The maxium accuracy on the validation set was {r_best:.5f}')\n print(f'The ideal number of iterations was {best_estimators}.')\n\n # Fit on all of the training data using the ideal number of iterations\n model = lgb.LGBMClassifier(n_estimators=best_estimators, n_jobs = -1,\n **params, random_state = 42) \n model.fit(X_train, Y_train)\n\n return model", "def classify(self, X, y):\n\n clf = svm.SVC(kernel='linear', C=1)\n cv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)\n\n scores = cross_val_score(clf, X, y, cv=cv, scoring='balanced_accuracy')\n\n return scores", "def findBestScore():\n resultList = []\n BestScore = 0\n # iterate through different max_depths from 1 to 19\n for max_depth in range(1,20):\n dtree = tree.DecisionTreeClassifier(max_depth=max_depth)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n dtree = dtree.fit(cv_data_train, cv_target_train)\n dtree.feature_importances_\n trainng_score += [dtree.score(cv_data_train,cv_target_train)]\n testing_score += [dtree.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_depth = max_depth\n resultList += [[best_depth, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding max_depth is: ')\n return BestScore, best_depth", "def cross_validation_score(X_data, y_data, model, folds=5):\n\n # Shuffle index\n index = np.random.permutation(len(X_data))\n\n fold_size = int(len(X_data) / folds)\n scores = []\n for i in range(folds):\n \n # Partition Data\n X_train, X_val = partition_data(X_data[index], folds, i + 1, fold_size)\n y_train, y_val = partition_data(y_data[index], folds, i + 1, fold_size)\n\n # Train Model\n print(f\"Training on Fold: {i + 1}\")\n model.fit(X_train, y_train)\n\n # Predict Values on Validation Set\n val_pred = model.predict(X_val)\n\n # Get Accuracy\n score = accuracy_score(y_val, val_pred)\n scores.append(score)\n \n return sum(scores) / len(scores)", "def cross_valid(model,x,folds,metric,verbose=True): \r\n\r\n score=[]\r\n \r\n\r\n kf = KFold(folds,shuffle=False,random_state=0) \r\n\r\n\r\n i=0\r\n for train_index, test_index in kf.split(x):\r\n\r\n xtrain = x[train_index,:]\r\n xtest = x[test_index,:]\r\n\r\n model.fit(xtrain[:,:-1],xtrain[:,-1])\r\n\r\n ypred = model.predict(xtest[:,:-1])\r\n\r\n ytrue= xtest[:,-1] \r\n \r\n \r\n if metric == 'mae':\r\n score.append(mae(ytrue,ypred))\r\n elif metric == 'mse':\r\n score.append(mse(ytrue,ypred))\r\n elif metric == 'rrmse':\r\n score.append(rrmse(ytrue,ypred))\r\n\r\n else:\r\n score.append(rmse(xtest[:,-1],ypred))\r\n\r\n if verbose:\r\n print('-'*30)\r\n print(f'\\nFold {i+1} out of {folds}')\r\n print(f'{metric}: {score[i]}')\r\n\r\n i+=1\r\n\r\n if verbose:\r\n print(f'\\n Overall Score:')\r\n print(f'{metric}: Mean: {np.mean(score)} Std: {np.std(score)}')\r\n\r\n\r\n return score", "def __init__(self,\n n_splits_outer=5,\n gridsearch='half',\n n_components=3,\n classifier='svm',\n cv=5,\n min_resources='smallest',\n factor=3,\n score_func=metrics.f1_score,\n average='weighted',\n random_state=None,\n n_jobs=-1,\n **kwargs,\n ):\n\n self.outer_cv = \\\n StratifiedKFold(n_splits=n_splits_outer,\n shuffle=True, # each fold is independent\n random_state=random_state)\n\n super().__init__(gridsearch,\n n_components,\n classifier,\n cv,\n min_resources,\n factor,\n score_func,\n average,\n random_state,\n n_jobs,\n **kwargs,\n )", "def cv_train(dataset, labels, cv=10):\n reg = linear_model.BayesianRidge()\n mae_list = -cross_val_score(reg, dataset, labels, cv=cv, n_jobs=-1, scoring='neg_mean_absolute_error')\n rmse_list = np.sqrt(-cross_val_score(reg, dataset, labels, cv=cv, n_jobs=-1, scoring='neg_mean_squared_error'))\n pc_list = cross_val_score(reg, dataset, labels, cv=cv, n_jobs=-1, scoring='r2')\n\n print(mae_list)\n print(rmse_list)\n print(pc_list)\n\n print('=========The Mean Absolute Error of Model is {0}========='.format(np.mean(mae_list)))\n print('=========The Root Mean Square Error of Model is {0}========='.format(np.mean(rmse_list)))\n print('=========The Pearson Correlation of Model is {0}========='.format(np.mean(pc_list)))\n\n mkdirs_if_not_exist('./model')\n joblib.dump(reg, \"./model/BayesRidge_SCUT-FBP.pkl\")\n print('The regression model has been persisted...')", "def HypeNKFoldCV(x,\n group_cols,\n target_col,\n clf,\n nfolds,\n kfolds,\n alpha,\n noise_std,\n scorer):\n ## all indices\n all_idx = x.copy().index.values\n ## will shuffle indices for randomisation\n np.random.shuffle(all_idx)\n ## outer splits indices\n outer_splits = np.array_split(all_idx, nfolds)\n ## scorer results\n scores_val = []\n ## outer cycle\n for i in range(nfolds):\n ## keep `i`-th fold for validation\n val_idx = outer_splits[i]\n x_val = x.loc[val_idx].copy()\n ## choose all but `i`-th split\n inner_idx = np.concatenate(outer_splits[:i] + outer_splits[(i + 1):])\n ## further randomise training indices\n np.random.shuffle(inner_idx)\n ## split others further\n inner_splits = np.array_split(inner_idx, kfolds)\n ## training data frame\n x_train = x.loc[inner_idx].copy()\n ## iterate over group cols\n for group_col in group_cols:\n n_col_name = '_'.join([group_col, target_col])\n ## encode using division into KFolds\n x_train.loc[:, n_col_name] = KFoldTargetEncoding(x_train[[group_col, target_col]].copy(),\n inner_splits,\n group_col,\n target_col,\n n_col_name,\n alpha,\n noise_std)\n ## filling in the same column on val\n ## using whole `x_train`\n x_val.loc[:, n_col_name] = targetEncoding(x_train.loc[:, [group_col, target_col]],\n x_val.loc[:, [group_col]],\n group_col,\n target_col,\n alpha,\n noise_std)\n\n ## will train on x_train\n ## will validate on x_val\n if 'fit' in dir(clf):\n clf.fit(x_train.drop(target_col, axis=1), x_train[target_col])\n preds_val = clf.predict(x_val.drop(target_col, axis=1))\n elif 'train' in dir(clf):\n clf.train(x_train.drop(target_col, axis=1), x_train[target_col])\n preds_val = clf.test(x_val.drop(target_col, axis=1)).argmax(axis=1)\n else:\n raise Exception(\"`clf` must contain either (`fit` and `predict`) or\"\n \" (`train` and `test`) methods\")\n scores_val.append(scorer(x_val[target_col], preds_val))\n del x_val, preds_val, x_train\n return scores_val", "def crossValidationKfold(automodel, \r\n X, y,\r\n params_automl : dict = {},\r\n score_function = accuracy_score,\r\n cv : int = 3,\r\n shuffle: bool = True,\r\n verbose : bool = True,\r\n allmetrics: bool = False):\r\n if(isinstance(X, pd.DataFrame) or isinstance(y, pd.DataFrame)):\r\n X = X.values\r\n y = y.values\r\n skf = StratifiedKFold(n_splits = cv, \r\n shuffle = shuffle, \r\n random_state = 42)\r\n if(allmetrics):\r\n train_scores = {'accuracy' : [], \r\n 'roc_auc': [], \r\n 'f1' : [], \r\n 'recall' : [], \r\n 'precision': []}\r\n test_scores = {'accuracy' : [], \r\n 'roc_auc': [], \r\n 'f1' : [], \r\n 'recall' : [], \r\n 'precision': []}\r\n else:\r\n train_scores = np.empty((cv, ))\r\n test_scores = np.empty((cv, ))\r\n for idx, (idx_tr, idx_ts) in enumerate(skf.split(X, y)):\r\n X_tr, X_ts = X[idx_tr], X[idx_ts]\r\n y_tr, y_ts = y[idx_tr], y[idx_ts] \r\n am = automodel(**params_automl)\r\n am.fit(X_tr, y_tr)\r\n if(not allmetrics):\r\n \r\n train_scores[idx] = score_function(am.predict(X_tr), y_tr)\r\n test_scores[idx] = score_function(am.predict(X_ts), y_ts)\r\n if(verbose):\r\n print('it: {} train score: {:.3f}, val score: {:.3f}'.format(idx, \r\n train_scores[idx],\r\n test_scores[idx]))\r\n else:\r\n train_current = {}\r\n test_current = {}\r\n for name, metric in all_metrics_classifications.items():\r\n train_current[name] = metric(am.predict(X_tr), y_tr)\r\n test_current[name] = metric(am.predict(X_ts), y_ts)\r\n train_scores[name].append(train_current[name])\r\n test_scores[name].append(test_current[name])\r\n \r\n if(verbose):\r\n print('it: {} train scores: {}, val scores: {}'.format(idx, train_current,\r\n test_current))\r\n\r\n if(not allmetrics):\r\n return test_scores.mean(), test_scores.std()\r\n else:\r\n # -- calculate means of all metrics-- #\r\n return dict(map(lambda kv: (kv[0], np.asarray(kv[1]).mean()), test_scores.items()))", "def validaccurary(C):\r\n acc=np.zeros((nFolds))\r\n for i in range(0,nFolds):\r\n \r\n train_x, train_y, valid_x, valid_y=get_next_train_valid(i)\r\n w,b=svmfit(i,C)\r\n y_predict=predict(valid_x,w,b)\r\n valid_y=valid_y.reshape(len(valid_y),1)\r\n k=np.sum(np.absolute(y_predict-valid_y))/2\r\n n=len(valid_y)\r\n acc[i]=1-k/n\r\n \r\n accurary=np.mean(acc)\r\n \r\n return accurary", "def cv_rbfsvm(x_data, y_data, n_class, n_feats):\n\n # Do standard scaling to all data (should do train and project unto test)\n sample_scaler = StandardScaler()\n x_scaled = sample_scaler.fit_transform(x_data)\n\n # Grid search CV to search for optimal model parameters C and gamma\n #C_range = np.logspace(-2, 10, 13)\n #gamma_range = np.logspace(-9, 3, 13)\n C_range = np.logspace(-2, 10, 5)\n gamma_range = np.logspace(-9, 3, 5)\n param_grid = dict(gamma = gamma_range, C = C_range)\n # do 80 / 20 - train / test split\n cv = StratifiedShuffleSplit(y_data, n_iter = 3, test_size = 0.2, random_state = 1)\n grid = GridSearchCV(SVC(), param_grid = param_grid, cv = cv)\n grid.fit(x_scaled, y_data)\n\n return grid", "def run_experiment ( X, y, model_call, param_grid = None, scoring_func = accuracy,cv = KFoldStratifiedCV ( number_of_folds = 5 ),):\n\n scores = []\n iteration = 0\n # Iterate through the split\n for train, test in cv.split ( y ):\n # If first iteration and k values are passed, get the best one\n if iteration == 0 and param_grid:\n k = choose_k (\n X [ train ], y [ train ], model_call, param_grid, scoring_func, cv = cv )\n logger.info ( f\"Choosing k= { k } \" )\n else:\n # Defaults to 1 for condensed.\n k = 1\n\n iteration += 1\n\n # Instantiate the model with the value of k\n model = model_call ( k = k )\n\n # Standardize the data\n standardizer = Standardizer ( mean = True, std = True )\n\n # Fit the model\n model.fit ( X = standardizer.fit_transform ( X [ train ] ), y = y [ train ] )\n\n # make test set predictions\n y_pred = model.predict ( X = standardizer.transform ( X [ test ] ) )\n\n # Append the score\n scores.append ( scoring_func ( y [ test ], y_pred ) )\n \n logger.info ( f\"Avg Score: { np.mean ( scores ) } \" )\n \n return model\n # End run_experiment()", "def scoring(self):\n pass", "def cv_training(\n db: audformat.Database,\n partitioning: str,\n features: pd.DataFrame,\n normalization: str,\n root: str\n):\n\n df = db['covid'].df\n df = df.loc[~df['covid'].isna()]\n df['covid'] = df['covid'].apply(lambda x: 'positive' if x else 'negative')\n df['speaker'] = db['files'].get(index=df.index)['speaker']\n folds = sorted(list(set([x.split('.')[-2] for x in db.tables if f'folds.{partitioning}' in x])))\n\n metrics = {\n 'F1': audmetric.unweighted_average_fscore,\n 'UAR': audmetric.unweighted_average_recall,\n 'ACC': audmetric.accuracy\n }\n\n if not os.path.exists(os.path.join(root, 'results.csv')):\n for fold in folds:\n\n def get_fold(db, fold_name):\n df = db[f'folds.{partitioning}.{fold}.{fold_name}'].df\n df['speaker'] = db['files'].get(index=df.index)['speaker']\n df = df.loc[~df['covid'].isna()]\n df['covid'] = df['covid'].apply(lambda x: 'positive' if x else 'negative')\n return df\n df_train = get_fold(db, 'train')\n df_dev = get_fold(db, 'dev')\n df_test = get_fold(db, 'test')\n\n features = features.fillna(0)\n\n c_params = [\n .0001, \n .0005, \n .001, \n .005, \n .01, \n .05, \n .1, \n .5, \n 1\n ]\n\n steps = []\n if normalization == 'standard':\n # normalization performed on the fly for each fold\n steps.append(('scale', StandardScaler()))\n steps.append(('classify', SVC(kernel='rbf', probability=True)))\n\n max_f1 = 0\n best_c = None\n for c_param in audeer.progress_bar(\n c_params,\n total=len(c_params),\n desc='LOSO',\n disable=True\n ):\n \n clf = Pipeline(steps)\n clf.set_params(**{'classify__C': c_param})\n clf.fit(\n features.loc[df_train.index],\n df_train['covid'],\n )\n pred = clf.predict(features.loc[df_dev.index])\n f1_score = audmetric.unweighted_average_fscore(df_dev['covid'], pred)\n if f1_score > max_f1:\n max_f1 = f1_score\n best_c = c_param\n \n clf.set_params(**{'classify__C': best_c})\n clf.fit(\n features.loc[pd.concat((df_train, df_dev)).index],\n pd.concat((df_train, df_dev))['covid'],\n )\n joblib.dump(\n clf,\n os.path.join(root, f'clf.{fold}.pkl')\n )\n df.loc[df_test.index, 'predictions'] = clf.predict(features.loc[df_test.index])\n df.loc[df_test.index, 'probabilities'] = clf.predict_proba(features.loc[df_test.index])[:, 0]\n \n df.reset_index(inplace=True)\n df.to_csv(os.path.join(root, 'results.csv'), index=False)\n else:\n df = pd.read_csv(os.path.join(root, 'results.csv'))\n\n results = {\n key: metrics[key](df['covid'], df['predictions'])\n for key in metrics\n }\n with open(os.path.join(root, 'results.yaml'), 'w') as fp:\n yaml.dump(results, fp)\n\n file_df = df.groupby('file').apply(\n lambda x: pd.Series({\n 'covid': x['covid'].mode()[0],\n 'predictions': x['predictions'].mode()[0]\n })\n )\n\n results = {\n key: metrics[key](file_df['covid'], file_df['predictions'])\n for key in metrics\n }\n with open(os.path.join(root, 'speaker_results.yaml'), 'w') as fp:\n yaml.dump(results, fp)", "def cv(self, x: pd.Series, y: pd.Series, n_splits: int, refit: bool = True, **fit_kwargs) -> List[list]:\n x = self.text_formatter.fit_transform(x)\n y_one_hot = self.label_encoder.fit_transform(y)\n if y_one_hot.shape[1] == 1:\n y_one_hot = np.hstack((y_one_hot, 1 - y_one_hot))\n skf = StratifiedKFold(n_splits=n_splits)\n scores = []\n for train_index, test_index in skf.split(x, y):\n x_train, x_test = x[train_index], x[test_index]\n y_train, y_test = y_one_hot[train_index], y_one_hot[test_index]\n self._fit(x, y_one_hot, **fit_kwargs)\n results = self.model.evaluate(x_test, y_test)\n scores.append(results)\n if refit:\n self._fit(x, y_one_hot, **fit_kwargs)\n return scores", "def report_cv_stats(n_fold, model, samples, labels, comment=None):\n\n # compute n-fold cross validation accuracy for model\n accuracy = cross_validation.cross_val_score(model, samples, labels, cv=n_fold)\n\n # compute mean and standard deviation\n accuracy_m = accuracy.mean()\n accuracy_s = accuracy.std()\n\n text = \"\"\n if comment:\n text = \"(\" + comment + \")\"\n\n print(\"Accuracy\" + text + \": %0.2f (+/- %0.2f)\" % (accuracy_m * 100, accuracy_s * 100 * 2))\n\n return accuracy_m, accuracy_s", "def crossValidate(dataset, folds):\n\tshuffle(dataset)\n\tcv_results = []\n\tprecision_recall_acc = []\n\tfoldSize = int(len(dataset)/folds)\n\tfor i in range(0,len(dataset),foldSize):\n\t\t# preparing data\n\t\tvalD = dataset[i:i+foldSize]\n\t\ttestD = dataset[:i]+dataset[i+foldSize:] #list(set(dataset)-set(dataset[i:i+foldSize]))\n\t\t# Training\n\t\tprint(\"*\"*60)\n\t\tprint(\"Training on data-set size \"+str(len(testD))+\" of batch \"+str(i/(foldSize)))\n\t\tclassi = trainClassifier(testD)\n\t\t# Prediction on validation data \n\t\tprint(\"Predicting on heldout data-set size...\"+str(len(valD))+\" of batch \"+str(i/(foldSize)))\n\t\ty_true = list(map(lambda t: t[1], valD))\n\t\ty_pred = predictLabels(valD,classi)\t\t\n\t\t# Performance Metrics\t\t\n\t\t# average based on macro as it calculate metrics for each label, and find their unweighted mean.\n\t\tprecision_recall = list(precision_recall_fscore_support(y_true, y_pred, average='macro'))\n\t\tacc = accuracy_score(y_true,y_pred)\n\t\tprecision_recall[-1] = acc\n\t\tprint(precision_recall)\n\t\tprecision_recall_acc.append(precision_recall)\n\tdf = pd.DataFrame(precision_recall_acc,columns = [\"Precision\",\"Recall\",\"F1 score\",\"Accuracy Score\"])\n\tprint(df)\n\tcv_results = df.mean().tolist()\n\treturn cv_results", "def cv_multiclass_fold(Y,num_fold=10):\n\t\n (K,N) = Y.shape\n indices = dict()\n Nk = dict()\n for k in range(K):\n # select indices belonging to class k\n indices[k] = list((Y[k,:]==1).nonzero()[0])\n rand.shuffle(indices[k])\n Nk[k] = len(indices[k])/num_fold\n\t\n index_list = []\n\n for k in range(K):\n for i in range(num_fold-1):\n # split class-k indices into num_fold random sets\n try:\n index_list[i].extend(indices[k][Nk[k]*i:Nk[k]*(i+1)])\n except IndexError:\n index_list.append([])\n index_list[i].extend(indices[k][Nk[k]*i:Nk[k]*(i+1)])\n try:\n index_list[num_fold-1].extend(indices[k][Nk[k]*(num_fold-1):])\n except IndexError:\n index_list.append([])\n index_list[num_fold-1].extend(indices[k][Nk[k]*(num_fold-1):])\n\n return index_list", "def viterbi_score(confusion_networks):\n for confusion_network in confusion_networks:\n prev, score = [-infinity] * len(confusion_network), [-infinity] + [0.0] * len(confusion_network)\n for t in range(0, len(confusion_network)): # t: words in the sentence (\"bfs\")\n prev, score = score, prev\n for j in range(0, len(confusion_network[t])): # Iterates deep-first in a CN position (\"dfs\")\n score[j] = max([prev[i] +\n confusion_network[i][j][2]\n for i in range(0, len(confusion_network[t]))])\n return max([score[i] for i in range(1, len(confusion_network[t]))])", "def cross_validate(X, Y, folds=5):\n\n log = LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, max_iter=200, multi_class='ovr', n_jobs=3,\n penalty='l2', random_state=None, solver='liblinear', tol=0.0001,\n verbose=0, warm_start=False)\n \n\n \n\n\n scores_log = [] \n scores_forest = []\n index = np.arange(X.shape[0])\n score_log = 0\n score_forest = 0\n \n for i in range(folds):\n score_log = 0\n score_forest = 0\n \n test_index = np.random.choice(index, int(X.shape[0]*1/folds),replace=False)\n index = np.setdiff1d(np.arange(X.shape[0]),test_index)\n \n test_x = X[test_index]\n test_y = Y[test_index]\n\n log.fit(X[index],Y[index])\n pred_log = log.predict(test_x)\n \n ran.fit(X[index],Y[index])\n pred_ran = ran.predict(test_x)\n \n for i in range(len(test_y)):\n if(pred_log[i] == test_y[i]):\n score_log += 1\n if(pred_ran[i] == test_y[i]):\n score_forest += 1\n scores_log.append(score_log/len(test_y))\n scores_forest.append(score_forest/len(test_y))\n \n\n return (np.mean(scores_log),np.mean(scores_forest))", "def cv(preds_path_stem, num_ensemble=1):\n fold_accs = []\n fold_c_matricies = []\n for fold in range(1, 6):\n data_val = load_dataset(\n f'Data/esc50_mel_wind_tfr/raw/fold_{fold}.tfrecords')\n pred_paths=[f'{preds_path_stem}preds_fold_{i}_{fold}.npy'\n for i in range(1, num_ensemble+1)]\n fold_acc, fold_c_matrix = test_wind_mel_model(pred_paths, data_val)\n fold_accs.append(fold_acc)\n fold_c_matricies.append(fold_c_matrix)\n cv_acc = np.mean(fold_accs)\n cv_acc_std = np.std(fold_accs)\n c_matrix = np.sum(fold_c_matricies, axis=0) / np.sum(fold_c_matricies)\n np.save(f'{preds_path_stem}cmatrix_{num_ensemble}.npy', c_matrix)\n print(f\"The cross validation accuracy is {cv_acc:.4f} \"\n f\"+/- 1.96 * {cv_acc_std:.4f}\")", "def cross_validate(cv, x, y, k=1):\n indices = np.arange(len(x))\n np.random.shuffle(indices)\n stepsize = int(len(x) / cv)\n metrics = np.zeros(4)\n for i in range(cv):\n logging.info(f\"Cross-validation fold {i+1}\")\n\n # Slice test set out of data\n test_indices = indices[i*stepsize:i*stepsize+stepsize]\n x_test = x[test_indices]\n y_test = y[test_indices]\n\n # Everything else is the training set\n x_train = np.copy(x)\n x_train = np.delete(x_train, test_indices, axis=0)\n y_train = np.copy(y)\n y_train = np.delete(y_train, test_indices, axis=0)\n\n metrics += evaluate(knn(x_test, x_train, y_train, k), y_test)\n metrics /= cv\n\n print(metrics)\n return metrics", "def cross_validation(y, tx, k_fold, fit_function, score_function, seed=1, **fit_function_kwargs):\n k_indices = build_k_indices(y, k_fold, seed)\n score_te = 0\n\n for k in range(k_fold):\n te_indices = k_indices[k]\n tr_indices = k_indices[~(np.arange(k_indices.shape[0]) == k)].reshape(-1)\n\n y_te, x_te = y[te_indices], tx[te_indices]\n y_tr, x_tr = y[tr_indices], tx[tr_indices]\n\n w, fit_loss = fit_function(y_tr, x_tr, **fit_function_kwargs)\n score_te += score_function(y_te, x_te, w)\n\n return score_te/k_fold", "def evaluate(train, train_labels, test, test_labels):\n \n # Use the same model for each training set for now\n model = RandomForestClassifier(n_estimators = 100, \n random_state = 50, n_jobs = -1)\n \n train = train.replace({np.inf: np.nan, -np.inf: np.nan})\n test = test.replace({np.inf: np.nan, -np.inf:np.nan})\n \n feature_names = list(train.columns)\n \n # Impute the missing values\n imputer = Imputer(strategy = 'median', axis = 1)\n train = imputer.fit_transform(train)\n test = imputer.transform(test)\n \n cv_score = 1 * cross_val_score(model, train, train_labels, \n scoring = \"f1\", \n cv = 5)\n \n # Fit on the training data and make predictions\n model.fit(train, train_labels)\n preds = model.predict(test)\n \n # Calculate the performance\n f1 = f1_score(test_labels, preds)\n print('5-fold CV F1: {:.2f} with std: {:.2f}'.format(cv_score.mean(),cv_score.std()))\n print('Test F1: {:.2f}.'.format(f1))\n \n feature_importances = pd.DataFrame({'feature': feature_names, \n 'importance': model.feature_importances_})\n \n return preds, feature_importances", "def CV_fit(model, data, folds=5, random_state: int=None):\n kf = KFold(n_splits=folds, shuffle=False, random_state=random_state)\n kf = kf.split(X=data[0])\n\n # Fit k models and store them\n results = []\n for train_ids, test_ids in kf:\n scaler = pka_scaler(data[1][train_ids])\n if model.data_type == 'descriptors':\n desc_scaler = StandardScaler()\n desc_scaler.fit(data[0][train_ids])\n data[0] = desc_scaler.transform(data[0])\n fold_model = train(model, train_ids, data, scaler)\n fold_result = test(model, fold_model, test_ids, data, scaler)\n\n results.append(fold_result)\n avg_result = np.mean(results, axis=0)\n return avg_result, results", "def cv_with_entropy(X, Y):\n\t# Decision tree with entropy\n\tclf_entropy = decision_tree_clf()\n\n\t# Returns score\n\tresult = cross_val_score(\n\t\tclf_entropy, X, Y, \n\t\tscoring='f1_macro', \n\t\tcv=StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)\n\t\t)\n\treturn result", "def cross_validation_score(self, model, x, y, cv, groups):\n losses = []\n for train_idx, test_idx in cv.split(x, y, groups):\n x_tr, x_te = x[train_idx], x[test_idx]\n y_tr, y_te = y[train_idx], y[test_idx]\n\n model.fit(x_tr, y_tr)\n if self.is_classier:\n test_preds = model.predict_proba(x_te)[:, 1]\n else:\n test_preds = model.predict(x_te)[:,]\n loss = self.loss_metric(y_true=y_te, y_pred=test_preds)\n losses.append(loss)\n return np.mean(losses)", "def crossValidation(data, output_variable_name):\r\n X, xt, y, yt = train_test_split(\r\n data.drop(output_variable_name, axis=1), data[output_variable_name], test_size=0.01, random_state=SEED)\r\n\r\n model = pickle.load(open(\"models/lasso.sav\", 'rb'))\r\n lassoCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n model = pickle.load(open(\"models/ridge.sav\", 'rb'))\r\n ridgeCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n model = pickle.load(open(\"models/decisionTree.sav\", 'rb'))\r\n decTreeCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n param = {\r\n 'max_depth': 15,\r\n 'eta': 0.1,\r\n 'objective': 'reg:squarederror',\r\n 'nthread': 16,\r\n \"subsample\": 0.5,\r\n \"colsample_bytree\": 0.5,\r\n 'eval_metric': 'rmse'\r\n }\r\n num_round = XGB_EPOCH_NR\r\n\r\n dtrain = xgb.DMatrix(X, label=y)\r\n xgbCV = xgb.cv(\r\n param,\r\n dtrain,\r\n num_boost_round=num_round,\r\n seed=SEED,\r\n nfold=5,\r\n metrics={'rmse'}\r\n )[\"test-rmse-mean\"][-1:]\r\n\r\n param = {\r\n \"iterations\": 400,\r\n \"learning_rate\": 0.02,\r\n \"depth\": 12,\r\n \"eval_metric\": 'RMSE',\r\n \"random_seed\": 23,\r\n \"bagging_temperature\": 0.2,\r\n \"od_type\": 'Iter',\r\n \"metric_period\": 75,\r\n \"od_wait\": 100\r\n }\r\n\r\n catBoostCV = cv(data, param, fold_count=5, plot=True)\r\n\r\n return lassoCV, ridgeCV, decTreeCV, xgbCV, catBoostCV", "def svm_clf_training(max_features, data):\r\n X_train, y_train, X_test, y_test = data\r\n clf = Pipeline([('feature_selection', SelectKBest(score_func=chi2, k=max_features)),\r\n ('clf', svm.SVC(C=1., kernel='linear'))])\r\n\r\n vectorizer = CountVectorizer(ngram_range=(1, 2), lowercase=True) # unigrams and bigrams\r\n X_matrix_tr = vectorizer.fit_transform(X_train)\r\n # parameters = [{'clf__kernel': ['linear'], 'clf__C': [0.1, 1, 10, 100]},\r\n # {'clf__kernel': ['rbf'], 'clf__C': [0.1, 1, 10, 100], 'clf__gamma': [0.001, 0.01, 0.1]},\r\n # {'clf__kernel': ['poly'], 'clf__C': [0.1, 1, 10, 100], 'clf__degree': [2, 3, 4, 5]}]\r\n # clf = GridSearchCV(svc, parameters, scoring='accuracy')\r\n clf.fit(X_matrix_tr, y_train)\r\n # print(\"Best parameters set found on development set:\")\r\n # print()\r\n # print(clf.best_estimator_)\r\n # print()\r\n # print(\"Grid scores on development set:\")\r\n # print()\r\n # for params, mean_score, scores in clf.grid_scores_:\r\n # print(\"%0.3f (+/-%0.03f) for %r\"\r\n # % (mean_score, scores.std() / 2, params))\r\n # print()\r\n voc = vectorizer.get_feature_names()\r\n # vectorizer1 = CountVectorizer(ngram_range=(1, 2), lowercase=True, vocabulary=voc)\r\n # X_matrix_val = vectorizer1.fit_transform(X_test)\r\n # y_pred = clf.predict(X_matrix_val)\r\n\r\n # for i in range(len(X_test)):\r\n # if y_test[i] != y_pred[i]:\r\n # print(X_test[i], y_test[i], y_pred[i])\r\n # print(classification_report(y_test, y_pred))\r\n return clf, voc", "def Bayes_prediction(X, y, fold_number=10):\n D = X.shape[1]\n fold = KFold(n_splits=fold_number)\n cross_tab_all = []\n lamb_hat_all = []\n \n for train_index, test_index in fold.split(X, y):\n X_train, X_test = X.iloc[train_index], X.iloc[test_index]\n y_train, y_test = y.iloc[train_index], y.iloc[test_index]\n length = X_train.shape[0]\n pi_hat = y_train.mean()\n lamb_hat = np.zeros((2, D))\n \n for flag in range(2):\n for d in range(D):\n lamb_hat[flag][d] = (sum(X_train.iloc[i][d] * (y_train.iloc[i]==flag) for i in range(length))) / (sum(y_train.iloc[i]==flag for i in range(length)))\n\n y_pred = np.zeros(len(X_test))\n for i in range(len(X_test)):\n y_pred[i] = Bayes_classifier(pi_hat, X_test.iloc[i], lamb_hat)\n \n cross_tab = np.zeros((2, 2))\n for m in [0, 1]:\n for n in [0, 1]:\n cross_tab[m][n] = sum([(y_test.values[i]==m) & (y_pred[i]==n) for i in range(len(y_pred))]) \n \n cross_tab_all.append(cross_tab)\n lamb_hat_all.append(lamb_hat)\n \n cross_tab_all = sum(cross_tab_all)\n lamb_hat_all\n\n return lamb_hat_all, cross_tab_all", "def splitfeatdata(rawdata, fold=10):\n\n labeldata = []\n for row in rawdata:\n\n # if row[2] > 0:\n # label = 'pos'\n # elif row[2] == 0:\n # label = 'neutral'\n # else:\n # label = 'neg'\n\n\n label = row[2]\n labeldata.append((row[4], label))\n\n\n random.shuffle(labeldata)\n\n size = int(math.floor(len(labeldata) / 10.0))\n # train = labeldata[:split]\n # test = labeldata[split:]\n\n # code for k-fold validation referred from:\n # http://stackoverflow.com/questions/16379313/how-to-use-the-a-10-fold-cross-validation-with-naive-bayes-classifier-and-nltk\n claccuracy = []\n for i in range(fold):\n test_this_round = labeldata[i*size:][:size]\n train_this_round = labeldata[:i*size] + labeldata[(i+1)*size:]\n\n acc = myclassifier(train_this_round, test_this_round)\n\n claccuracy.append(acc)\n\n\n\n print os.getcwd()\n\n\n mySentClassifier = nltk.NaiveBayesClassifier.train(labeldata)\n f = open('../../../mySentClassifier2.pickle', 'wb')\n dump(mySentClassifier, f)\n f.close()\n\n\n return claccuracy", "def xgboost_cv(self, nsplits: int = 5) -> (float, float, float):\r\n x_train, x_test, y_train, y_test = train_test_split(self.x, self.y, test_size=0.2)\r\n params = {\r\n \"max_depth\": [2, 3, 5, 8],\r\n \"eta\": [0.01, 0.05, 0.1, 0.15, 0.2],\r\n \"objective\": ['binary:logistic'],\r\n \"sumsample\": [0.5, 0.7, 1],\r\n \"colsample_bytree\": [0.5, 0.7, 1],\r\n \"n_estimators\": [50, 100, 200, 500],\r\n }\r\n \"\"\"\r\n fit_params = {\r\n \"early_stopping_rounds\": 20,\r\n \"eval_metric\": \"error\",\r\n \"eval_set\": [(x_test, y_test)]\r\n }\r\n \"\"\"\r\n model = xgb.XGBClassifier()\r\n gridcv = GridSearchCV(model, params, cv=nsplits)\r\n gridcv.fit(x_train, y_train) # , **fit_params)\r\n best_params = gridcv.best_params_\r\n cv = KFold(n_splits=nsplits)\r\n acc_result = []\r\n for train, test in cv.split(self.x):\r\n x_train = self.x[train, :]\r\n x_test = self.x[test, :]\r\n y_train = self.y[train]\r\n y_test = self.y[test]\r\n model = xgb.XGBClassifier(**best_params).fit(x_train, y_train)\r\n \"\"\"\r\n x_t, x_v, y_t, y_v = train_test_split(x_train, y_train, test_size=0.2)\r\n model = xgb.XGBClassifier(**best_params).fit(x_t, y_t, eval_metric=\"error\", eval_set=[(x_v, y_v)],\r\n early_stopping_rounds=20)\r\n \"\"\"\r\n y_predict = model.predict(x_test)\r\n acc_result.append(binary_acc(y_test, y_predict))\r\n return np.mean(acc_result), np.std(acc_result), best_params", "def cross_validate(featureFile, nFolds, verbosity = False, percentTData = 1., extype='attribution'):\n oData,aData = importC5(featureFile)\n nAuthors = len(set(aData))\n if extype == 'attribution' and np.mean(Counter(aData).values()) != Counter(aData).values()[0]:\n print('Number of docs per author should be equal in attribution experiment')\n docsPerFold = len(oData) / nFolds\n cMatrix = np.zeros( (nAuthors, nAuthors) )\n\n for N in range(0,nFolds):\n testAuthors = list()\n trainAuthors= list()\n testData = list()\n trainData = list()\n for idv in range(0,len(oData)):\n if (N+idv) % nFolds == 0:\n testData.append(oData[idv])\n testAuthors.append(aData[idv])\n else:\n trainData.append(oData[idv])\n trainAuthors.append(aData[idv]) \n teFile = '%s.cvtest' % (os.path.splitext(featureFile)[0])\n trFile = '%s.cvtrain' % (os.path.splitext(featureFile)[0])\n tAmount = int(round(len(trainAuthors) * percentTData)) # limit training data\n exportFoldFile(testData, testAuthors, teFile)\n exportFoldFile(trainData[0:tAmount], trainAuthors[0:tAmount], trFile)\n predict = classify(trFile, teFile, len(oData[0]))\n if extype != 'attribution':\n cMatrix += confusionMatrix(testAuthors, predict, extype)\n os.remove(teFile)\n os.remove(trFile)\n if percentTData != 1.0: print('Ran CV only with %.f %% (%d docs) of training data.' % (percentTData * 100, tAmount))\n return cMatrix", "def run_rfc():\n num_folds = 5\n with pd.HDFStore('./OT_clr_train_LGG_grade.h5') as store:\n X = store['expression'].values\n Y = store['labels'].values\n\n # standardize expression\n mu = np.mean(X,axis=0)\n std = np.std(X, axis=0)\n X = (X-mu)/std\n\n # define Predictor object to manage nested CV\n rf_predictor = Predictor(\n CVmodel(RandomForestClassifier_skl,[4,8,16,32,64,128], 'max_depth',\n n_estimators=100, n_jobs=-1),\n scorers.accuracy_scorer)\n # cross validate\n rf_cross_validation_scores = \\\n rf_predictor.cross_validate(X, Y,\n outer_folds=num_folds, inner_folds=num_folds)\n logger.info('Random Forest cross-validation = {0:.3f}'.format(\n np.mean(rf_cross_validation_scores)))", "def evaluate_ucf50():\n fv_features = 'fv_ucf50_python/'\n accs = []\n groups, full, sets = utility.split_data(fv_features, suffix='_fv.npy.gz',\n useLooCV=False)\n for i in xrange(5):\n ts = time.time()\n features_train, features_test, labels_train, labels_test = \\\n utility.load_groups(\n groups,np.setdiff1d(full,sets[i]),\n sets[i], scale=False, verbose=False)\n\n clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=6)\n clf.fit(features_train, labels_train)\n acc = clf.score(features_test, labels_test)\n print \"Fold %d accuracy: %.3f\" % (i, acc)\n print \"Train & testing time %.3f\" % (time.time() - ts)\n accs.append(acc)\n\n with open('fv_ucf50_accs_5fold.txt', 'w') as f:\n f.write(\"%s\\nMean:%.3f\" % (str(accs), np.mean(accs)))", "def findRFBestN():\n resultList = []\n BestScore = 0\n nList = [ n for n in range(1,200) if n%10 == 0]\n for n in nList:\n rforest = ensemble.RandomForestClassifier(max_depth=5, n_estimators=n)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n rforest = rforest.fit(cv_data_train, cv_target_train)\n trainng_score += [rforest.score(cv_data_train,cv_target_train)]\n testing_score += [rforest.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_n = n\n resultList += [[n, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding n_estimator is: ')\n return BestScore, best_n", "def tunning(self, nfolds=5):\n Cs = self.Cs\n param_grid = {'C': Cs}\n grid_search = GridSearchCV(self.model, param_grid, cv=nfolds)\n grid_search.fit(self.train_x, self.train_y)\n \n self.model.C = grid_search.best_params_['C']\n return grid_search.best_params_", "def scoring(estimator, features_test, labels_test):\n pred = estimator.predict(features_test)\n p = metrics.precision_score(labels_test, pred, average='micro')\n r = metrics.recall_score(labels_test, pred, average='micro')\n if p > 0.3 and r > 0.3:\n return metrics.f1_score(labels_test, pred, average='macro')\n return 0", "def classify(X, Y, skf, clf, round_threshold=0.5, average=\"macro\"):\n X = X.values\n if isinstance(Y, pd.Series):\n labels = [\"{}_0\".format(Y.name), \"{}_1\".format(Y.name)]\n Y = np.ravel(Y)\n else:\n Y, labels = Y.values, list(Y.columns)\n\n fold_results = []\n for train, test in skf.split(X, Y):\n current_clf = clone(clf)\n X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]\n\n current_clf.fit(X_train, Y_train)\n Y_prob = current_clf.predict_proba(X_test)\n Y_pred = current_clf.predict(X_test)\n\n (p, r, f1, auc, jac, hl, p_c,\n r_c, f1_c, s_c) = calculate_metrics(Y_test, Y_pred, Y_prob, average)\n\n # calculate overall scores for current fold\n fold_scores = {\n \"precision\": p,\n \"recall\": r,\n \"f1\": f1,\n \"auc\": auc,\n \"jaccard\": jac,\n \"hamming_loss\": hl\n }\n\n for i in range(len(labels)):\n fold_scores[\"precision_{0}\".format(labels[i])] = p_c[i]\n fold_scores[\"recall_{0}\".format(labels[i])] = r_c[i]\n fold_scores[\"f1_{0}\".format(labels[i])] = f1_c[i]\n fold_scores[\"support_{0}\".format(labels[i])] = s_c[i]\n\n fold_results.append({\n \"scores\": fold_scores,\n \"y_pred\": Y_pred,\n \"y_prob\": Y_prob,\n \"y_test\": Y_test\n })\n\n scores = {}\n for score in fold_results[0][\"scores\"].keys():\n values = [s[\"scores\"][score] for s in fold_results]\n scores[score] = (np.sum(values) if score.startswith(\"support_\")\n else np.mean(values))\n\n return scores, fold_results", "def update_cross_validate_scores(cv_scores):\n # TODO: move this func to cvrun.py (rename cvrun.py utils_cv.py)\n cv_folds = len(list(cv_scores.values())[0])\n\n df = cv_scores_to_df(cv_scores, decimals=3, calc_stats=False)\n\n # Add `metric` col\n v = list(map(lambda x: '_'.join(x.split('_')[1:]), df.index))\n df.insert(loc=0, column='metric', value=v)\n\n # Convert `neg` metric to positive and update metric names (drop `neg_`)\n # scikit-learn.org/stable/modules/model_evaluation.html --> explains the `neg` in `neg_mean_absolute_error`\n idx_bool = [True if 'neg_' in s else False for s in df['metric']]\n for i, bl in enumerate(idx_bool):\n if bl:\n df.iloc[i, -cv_folds:] = abs(df.iloc[i, -cv_folds:])\n df['metric'] = df['metric'].map(lambda s: s.split('neg_')[-1] if 'neg_' in s else s)\n\n # Add `tr_set` col\n v = list(map(lambda x: True if 'train' in x else False, df.index))\n df.insert(loc=1, column='tr_set', value=v)\n return df", "def randomized_search_cv(self, estimator, split): # Nested cv\n train_idx, test_idx = split\n x_train_matrix = copy.deepcopy(self.x_matrix.iloc[train_idx])\n y_train_vector = copy.deepcopy(self.y_vector.iloc[train_idx])\n x_test_matrix = copy.deepcopy(self.x_matrix.iloc[test_idx])\n y_test_vector = copy.deepcopy(self.y_vector.iloc[test_idx])\n\n estimator.fit(x_train_matrix, y_train_vector)\n\n y_test_scores = estimator.predict_proba(x_test_matrix)[:, 1]\n auc = [\n roc_auc_score(y_test_vector, y_test_scores),\n roc_curve(y_test_vector, y_test_scores)\n ]\n\n if isinstance(estimator, RandomizedSearchCV):\n training_report = dict(\n Scorer=estimator.scorer_,\n Params=estimator.get_params(),\n Best_params=estimator.best_params_,\n Best_score=estimator.best_score_,\n Best_index=estimator.best_index_,\n Cross_validations=estimator.cv_results_,\n Best_estimator=estimator.best_estimator_,\n Estimator_score=estimator.score(x_test_matrix, y_test_vector)\n )\n\n estimator = estimator.best_estimator_\n else:\n training_report = None\n\n # XXX: need update if use more estimator\n first_k_name = x_train_matrix.columns\n first_k_importance = estimator.steps[-1][-1].feature_importances_\n feature_importance = {\n name: importance\n for name, importance in zip(first_k_name, first_k_importance)\n }\n\n return (training_report, auc, feature_importance, estimator)", "def evaluate_num_centres(\n inputs, targets, folds, scale, reg_param, num_centres_sequence=None):\n # fix the reg_param\n reg_param = 0.01\n # fix the scale\n scale = 100\n # choose a range of numbers of centres\n if num_centres_sequence is None:\n num_centres_sequence = np.arange(5,200)\n num_values = num_centres_sequence.size\n print(num_values)\n num_folds = len(folds)\n #\n # create some arrays to store results\n train_mean_errors = np.zeros(num_values)\n test_mean_errors = np.zeros(num_values)\n train_stdev_errors = np.zeros(num_values)\n test_stdev_errors = np.zeros(num_values)\n # \n # run the experiments\n for c, num_centres in enumerate(num_centres_sequence):\n centres = np.linspace(0,1,num_centres)\n feature_mapping = construct_rbf_feature_mapping(centres,scale)\n designmtx = feature_mapping(inputs)\n # r is the index of reg_param, reg_param is the regularisation parameter\n # cross validate with this regularisation parameter\n train_errors, test_errors = cv_evaluation_linear_model(\n designmtx, targets, folds, reg_param=reg_param)\n # we're interested in the average (mean) training and testing errors\n train_mean_error = np.mean(train_errors)\n test_mean_error = np.mean(test_errors)\n train_stdev_error = np.std(train_errors)\n test_stdev_error = np.std(test_errors)\n # store the results\n train_mean_errors[c] = train_mean_error\n test_mean_errors[c] = test_mean_error\n train_stdev_errors[c] = train_stdev_error\n test_stdev_errors[c] = test_stdev_error\n #\n # Now plot the results\n fig, ax = plot_train_test_errors(\n \"Num. Centres\", num_centres_sequence, train_mean_errors, test_mean_errors)\n # Here we plot the error ranges too: mean plus/minus 1 standard error.\n # 1 standard error is the standard deviation divided by sqrt(n) where\n # n is the number of samples. \n # (There are other choices for error bars.)\n # train error bars\n lower = train_mean_errors - train_stdev_errors/np.sqrt(num_folds)\n upper = train_mean_errors + train_stdev_errors/np.sqrt(num_folds)\n ax.fill_between(num_centres_sequence, lower, upper, alpha=0.2, color='b')\n # test error bars\n lower = test_mean_errors - test_stdev_errors/np.sqrt(num_folds)\n upper = test_mean_errors + test_stdev_errors/np.sqrt(num_folds)\n ax.fill_between(num_centres_sequence, lower, upper, alpha=0.2, color='r')\n\n return train_mean_errors", "def score_classifier(\n X,\n y,\n clf,\n nfeats=None,\n scoring=default_scorers,\n score_aggreg=default_score_aggreg,\n scale=None,\n decompose=None,\n select=None,\n decompose_params={},\n nfolds=10,\n shuffle=True,\n random_fold_state=None,\n include_train_stats=False,\n):\n # give scoring and score_aggreg elements some names\n scoring = scoring or default_scorers\n scoring = mk_scoring_dict(scoring)\n score_aggreg = score_aggreg or default_score_aggreg\n score_aggreg = mk_score_aggreg_dict(score_aggreg)\n\n if nfeats is None:\n nfeats = np.shape(X)[1]\n\n # X = X[:, :nfeats]\n\n stratified_k_fold = StratifiedKFold(\n y, n_folds=nfolds, shuffle=shuffle, random_state=random_fold_state\n )\n score_info = list()\n for train, test in stratified_k_fold:\n d = dict()\n\n X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]\n\n if include_train_stats:\n d['train_pts'] = np.shape(X_train)[0]\n d['train_nfeats'] = np.shape(X_train)[1]\n\n pipeline_steps = list()\n if scale: # preprocessing.StandardScaler(), preprocessing.MinMaxScaler()\n pipeline_steps.append(('scale', scale))\n if decompose:\n pipeline_steps.append(('decompose', decompose))\n if select:\n pipeline_steps.append(('select', feature_selection.SelectKBest(k=nfeats)))\n else:\n X = X[:, :nfeats]\n\n pipeline_steps.append(('clf', clf))\n\n pipeline = Pipeline(steps=pipeline_steps)\n\n pipeline.fit(X_train, y_train)\n y_pred = pipeline.predict(X_test)\n\n for score_name, score_fun in scoring.items():\n d[score_name] = score_fun(y_test, y_pred)\n score_info.append(d)\n\n # return score_info\n score_info = pd.DataFrame(score_info)\n score_result = pd.Series()\n for score_aggreg_name, score_aggreg_fun in score_aggreg.items():\n t = score_info.apply(score_aggreg_fun)\n t.set_axis(\n axis=0,\n labels=[\n mk_aggreg_score_name(score_aggreg_name, score_name)\n for score_name in t.index.values\n ],\n )\n score_result = score_result.append(t)\n\n return score_result", "def score(self, data):\n\n score_mappings = {\n \"0\": np.log(self.class_zero_doc_count / self.total_docs),\n \"1\": np.log(self.class_one_doc_count / self.total_docs)\n }\n\n features = self.featurize(data)\n\n for f in features:\n\n if(f[0] in self.class_zero):\n cond_prob_zero = np.log((self.class_zero[f[0]] + 1) / (self.class_zero_feature_count + len(self.vocab)))\n elif(f[0] in self.vocab):\n cond_prob_zero = np.log(1 / (self.class_zero_feature_count + len(self.vocab)))\n else:\n cond_prob_zero = 0\n\n if(f[0] in self.class_one):\n cond_prob_one = np.log((self.class_one[f[0]] + 1) / (self.class_one_feature_count + len(self.vocab)))\n elif(f[0] in self.vocab):\n cond_prob_one = np.log(1 / (self.class_one_feature_count + len(self.vocab)))\n else:\n cond_prob_one = 0\n\n score_mappings[\"0\"] += cond_prob_zero\n score_mappings[\"1\"] += cond_prob_one\n\n score_mappings[\"0\"] = np.exp(score_mappings[\"0\"])\n score_mappings[\"1\"] = np.exp(score_mappings[\"1\"])\n\n return score_mappings", "def svc(n_components=10):\n\n train = pandas.read_csv('train.csv')\n y = train['target'].values\n X = raw_scaled_features(train)\n\n folds = StratifiedKFold(train['target'], 10)\n \n for train_indices, test_indices in folds:\n #print train_indices, test_indices\n X_train = X[train_indices]\n y_train = y[train_indices]\n X_test = X[test_indices]\n y_test = y[test_indices]\n\n pca = PCA(n_components=n_components)\n X_train = pca.fit_transform(X_train)\n X_test = pca.transform(X_test)\n #print X_train.shape\n\n svc = SVC(probability=True, verbose=False)\n svc.fit(X_train, y_train)\n y_prob = svc.predict_proba(X_test)\n\n print log_loss(y_test, y_prob, svc.classes_)", "def get_scores(self):\n hist = self.confusion_matrix\n # hist = [TN,FP;FN,TP]\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n # iou = iu.sum() / self.n_classes\n mean_iou = np.nanmean(iu) # if classes = 2: iou = miou\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n cls_iou = dict(zip(range(self.n_classes), iu))\n\n ##############################################\n tn = hist[0, 0]\n tp = np.diag(hist).sum() - tn\n fp = np.triu(hist, 1).sum()\n fn = np.tril(hist, -1).sum()\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f1 = 2 * precision * recall / (precision + recall)\n\n # for medical img, img_seg \\in [0,1]\n dice = 2 * tp / (tp + tp + fn + fp)\n # dice = f1-score\n dsc = 2 * tp / (tp + fn + fp)\n # dsc = jaccard\n # voe = 2 * abs(fp + fn) / (tp + tp + fn + fp)\n # voe = 1 - dsc\n\n k2 = {\n # \"Overall Acc: \\t\": acc,\n 'Mean Acc': float(judge_nan(acc_cls)),\n # \"FreqW Acc : \\t\": fwavacc,\n 'Mean IoU': float(judge_nan(mean_iou)),\n 'F1-score': float(judge_nan(f1)),\n 'DSC': float(judge_nan(dsc)),\n 'Precision': float(judge_nan(precision)),\n 'Recall': float(judge_nan(recall)),\n }\n\n return k2", "def build_model(): \n \n \n pipeline = Pipeline([('vect', CountVectorizer(tokenizer=tokenize)),('tfidf', TfidfTransformer()),\n ('MLC', MultiOutputClassifier(KNeighborsClassifier()))])\n \n parameters = {'MLC__estimator__n_neighbors': [3,5],'MLC__estimator__leaf_size':[10,20,30] }\n custom_recall = make_scorer(recall_score,average='weighted')\n\n cv = GridSearchCV(pipeline, param_grid = parameters, n_jobs = -1, verbose=2)\n\n\n return cv", "def cross_validation(self, X, y, n_folds=5, shuffle=True, evaluation_metric='top30'):\n # WE DON'T USE THIS\n # We use basic train-test split to evaluate or models as a first approach\n # We will then use CV for searching the best parameters via random search\n pass", "def print_score_from_restored_model(clf, X_test, y_test):\n y_predicted = clf.predict(X_test)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_test]\n\n # calc FNC score\n fold_score, _ = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n\n print(\"FNC-1 score from restored model: \" + str(score) +\"\\n\")\n\n return score", "def cv_lsvm(x_data, y_data, n_class, n_feats):\n\n # Do standard scaling to all data (should do train and project unto test)\n #sample_scaler = StandardScaler()\n #x_scaled = sample_scaler.fit_transform(x_data)\n\n # Grid search CV to search for optimal model parameter C\n #C_range = np.logspace(-2, 10, 13)\n C_range = np.logspace(-2, 10, 5)\n param_grid = dict(C = C_range)\n cv = StratifiedShuffleSplit(y_data, n_iter = 3, test_size = 0.2, random_state = 0)\n grid = GridSearchCV(SVC(kernel = \"linear\"), param_grid = param_grid, cv = cv)\n grid.fit(x_data, y_data)\n\n return grid", "def evaluate(clf, dataset, feature_list, features, labels, num_iter, params):\n\n features_train, features_test, labels_train, labels_test = \\\n train_test_split(features, labels, test_size=0.3, random_state=42)\n\n\n\n precision_values = []\n recall_values = []\n accuracy_values = []\n print clf\n for i in xrange(0, num_iter):\n #print params\n clf = GridSearchCV(clf, params)\n clf.fit(features_train, labels_train)\n print '*****************************'\n print clf.best_estimator_\n print clf.best_params_\n\n clf = clf.best_estimator_\n #test_classifier(clf, dataset, feature_list)\n pred = clf.predict(features_test)\n precision_values.append(precision_score(labels_test, pred))\n recall_values.append(recall_score(labels_test, pred))\n accuracy_values.append(accuracy_score(labels_test, pred))\n print 'Recall score: ', mean(recall_values)\n print 'Precision score: ', mean(precision_values)\n print 'Accuracy score: ' , mean(accuracy_values)", "def random_objective(params, iteration, n_folds = N_FOLDS):\n\n start = timer()\n \n # Perform n_folds cross validation\n cv_results = lgb.cv(params, train_set, num_boost_round = 10000, nfold = n_folds, \n early_stopping_rounds = 100, metrics = 'l2', seed = 50, stratified=False)\n end = timer()\n best_score = np.max(cv_results['l2-mean'])\n \n # Loss must be minimized\n loss = 1 - best_score\n \n # Boosting rounds that returned the highest cv score\n n_estimators = int(np.argmax(cv_results['l2-mean']) + 1)\n \n # Return list of results\n return [loss, params, iteration, n_estimators, end - start]", "def CV_fit(model, data, datasets, folds=5, random_state: int=None):\n kf = KFold(n_splits=folds, shuffle=False, random_state=random_state)\n kf = kf.split(X=data[0])\n\n # Fit k models and store them\n results = []\n for train_ids, test_ids in kf:\n scaler = pka_scaler(data[1][train_ids])\n if model.data_type == 'descriptors':\n desc_scaler = StandardScaler()\n desc_scaler.fit(data[0][train_ids])\n data[0] = desc_scaler.transform(data[0])\n fold_model = train(model, train_ids, data, scaler, datasets)\n fold_result = test(model, fold_model, test_ids, data, scaler)\n\n results.append(fold_result)\n avg_result = np.mean(results, axis=0)\n return avg_result, results", "def k_fold_linear(data: pd.DataFrame, algorithm, folds: int = 5) -> (float, float):\n d = data.sample(frac=1)\n segments = np.array_split(d, folds)\n acc_test = []\n\n acc_train = []\n for i in range(folds):\n temp = segments.copy()\n\n test = temp.pop(i)\n train = pd.concat(temp)\n test_labels = list(test['Labels'])\n train_labels = list(train['Labels'])\n\n model = algorithm(train)\n test_predictions = [round(x, 1) for x in predict_linear_regression(test.drop(['Labels'], axis=1), model)]\n train_predictions = [round(x, 1) for x in predict_linear_regression(train.drop(['Labels'], axis=1), model)]\n\n Confusion_Matrix(test_predictions, test_labels)\n\n acc_test.append(accuracy(test_predictions, test_labels))\n acc_train.append(accuracy(train_predictions, train_labels))\n\n return avg(acc_train), avg(acc_test)", "def ensemble(scores):\r\n c = Counter ()\r\n for probs in zip (scores):\r\n idx = int (np.argmax (np.array (probs)))\r\n c.update ([idx])\r\n best = c.most_common (1)[0][0]\r\n return best", "def multi_RSCV(method, grid, X, Y, metric, n_candidates, it):\r\n \r\n params_box = [None] * n_candidates\r\n metrics_box = pd.DataFrame(np.zeros((n_candidates, 1)), columns = list(['Score']))\r\n \r\n cv_KF = StratifiedKFold(n_splits = 5, shuffle = True)\r\n \r\n for i in range(n_candidates):\r\n seed_temp = math.ceil(random.uniform(1,1000))\r\n model = RandomizedSearchCV(method, grid, n_iter = it, cv = cv_KF, n_jobs = -1, scoring = metric, random_state = seed_temp) \r\n model.fit(X,Y)\r\n params_box[i] = model.best_params_\r\n metrics_box.iloc[i,0] = model.best_score_\r\n \r\n return params_box, metrics_box", "def cross_validate(pipeline, data, cv=4):\n print \"Running cross validation...\"\n (Xcv, ycv) = data\n kfold = KFold(n_splits=cv, shuffle=True, random_state=42)\n results = []\n for train_idx, val_idx in kfold.split(Xtrain):\n pipeline.fit(Xcv[train_idx], ycv[train_idx])\n results.append(accuracy_score(\n ycv[val_idx], pipeline.predict(Xcv[val_idx])\n ))\n print \"{} +/- {}\".format(np.mean(results), np.std(results))", "def cross_validation(feature_train, help_rank_train, model_name):\n clf = svm.SVC(kernel='linear', C=1).fit(feature_train, help_rank_train)\n clf_model = open(model_name,'wb')\n dump(clf, clf_model, -1)\n return", "def score_all(results):\n Y = np.concatenate([results['%dtest'%n] for n in range(10)])\n print score(np.concatenate([results['%dtrain'%n] for n in range(10)]))\n print score(np.concatenate([results['%dtest'%n] for n in range(10)]))\n class_counts = np.asarray([(Y[:,0]==n).sum() for n in range(10)])\n return confusion_matrix(Y[:,0],Y[:,1]), class_counts", "def solve(cv, cv_fit, ask):\n cv_trans = cv.transform(ask)\n tfidf = TfidfTransformer(use_idf=True,\n sublinear_tf=True).fit(cv_fit)\n tfidf_fit = tfidf.transform(cv_fit).toarray()\n tfidf_trans = tfidf.transform(cv_trans).toarray()\n tfbook = []\n for i in range(len(tfidf_fit)):\n sim_1 = 1 - spatial.distance.cosine(tfidf_trans[0], tfidf_fit[i])\n tfbook.append(sim_1)\n np.array(tfbook)\n index = np.argmax(tfbook)\n return index, tfbook[index]", "def cross_validation_accuracy(clf, X, labels, k):\n ###TODO\n\n cv = KFold(n=len(labels),n_folds=k)\n accuracies = []\n\n \n for train_indices, test_indices in cv:\n \n clf.fit(X[train_indices], labels[train_indices])\n predicted = clf.predict(X[test_indices])\n acc = accuracy_score(labels[test_indices], predicted)\n accuracies.append(acc)\n \n #print('accuracies = ',accuracies) \n #avg = np.mean(accuracies,dtype=np.float64)\n return(np.mean(accuracies,dtype=np.float64))", "def __init__(self, C, seed=None, n_folds=5, tol=0.1):\n\t\tself.rng = check_random_state(seed)\n\t\tself.C = C \n\t\tself.w = None\n\t\tself.tol = tol \n\t\tself.n_folds = n_folds # number of folds for cross validation", "def run_svm(data):\n X, y = data.ix[:, :-1], data.ix[:, -1]\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, train_size = int(TRAIN_PCT * len(X)))\n\n clf = SVC()\n\n param_grid = {\n 'C': [10 ** k for k in range(-3, 3)],\n # 'C': range(1, 11),\n 'kernel': ['linear', 'poly', 'rbf']}\n # 'kernel': ['rbf']}\n\n grid_results = GridSearchCV(clf, param_grid,\n scoring='roc_auc',\n cv=StratifiedKFold(y_train, n_folds=20),\n verbose=1)\n\n grid_results.fit(X_train, y_train)\n\n print '\\ngenlzn errors:'\n for params, mean_score, all_scores in grid_results.grid_scores_:\n print '{}\\t{}\\t(+/-) {}'.format(\n params,\n round(mean_score, 3),\n round(all_scores.std() / 2, 3))\n\n print '\\nbest model:'\n print '{}\\t{}'.format(grid_results.best_params_,\n round(grid_results.best_score_, 3))\n\n print '\\nclassification report:'\n print classification_report(y_test, grid_results.predict(X_test))\n\n print 'confusion matrix ({} total test recs, {} positive)'.format(\n len(y_test), sum(y_test))\n print confusion_matrix(y_test, grid_results.predict(X_test),\n labels=[1, 0])\n print", "def test_max_score(self):\n\n \"\"\"\n Create the test data.\n \"\"\"\n tokenizer = Tokenizer(stem=False)\n posts = [\n \"Erdogan with threats to attack regime forces 'everywhere' in Syria\",\n \"Damascus says Erdogan 'disconnected from reality' after threats\",\n ]\n\n corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ]\n\n extractor = TokenExtractor()\n scorer = TFIDFScorer({ 'erdogan': 1, 'threats': 2 }, 10)\n candidates = extractor.extract(corpus)\n scores = scorer.score(candidates, normalize_scores=True)\n self.assertTrue(all( score <= 1 for score in scores.values() ))", "def cv_reweighting(run):\n np.random.seed((run ** 5 + 1323002) % 123123) # np.random.seed() alternatively\n \n\n Xtr, Str, Xts, Yts = data_cache[dset]\n X_train, X_val, y_train, y_val = train_test_split(Xtr, Str, test_size=prop)\n\n # clf1 is the first classifier while clf2 is the second\n if dset == 2:\n clf1 = svm.SVC(C=2.5, gamma=0.000225, probability=True, max_iter=max_itera)\n else:\n clf1 = svm.SVC(gamma = 'scale',probability=True, max_iter=max_itera)\n if run == 1:\n print(\"learn initial probability dset:\", dset)\n clf1.fit(X_train, y_train)\n return clf1.score(Xts, Yts)\n if run == 1:\n print(\"calculating weighting dset:\", dset)\n\n probS = clf1.predict_proba(X_train)\n weights = estimateBeta(y_train, probS, 0.2, 0.4)\n\n for i in range(len(weights)):\n if weights[i] < 0:\n weights[i] = 0.0\n if run == 1:\n print(\"fit final model dset:\", dset)\n if dset == 2:\n clf2 = svm.SVC(gamma=0.000225, C=0.8, max_iter=max_itera)\n else:\n clf2 = svm.SVC(gamma=0.00865, C=.4, max_iter=max_itera)\n\n clf2.fit(X_train, y_train, sample_weight=weights)\n\n return clf2.score(Xts, Yts)", "def inference(train_X, test_X, y, model_params=None, folds=10):\n\n y_oof = np.zeros(train_X.shape[0]) # oof pred\n test_preds = np.zeros(test_X.shape[0]) # kfold pred\n score = 0 # average of kfold(AUC score)\n \n # -- Stratified KFold\n skf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=444)\n for fold, (train_idx, valid_idx) in enumerate(skf.split(train_X, y)):\n X_train, X_valid = train_X.loc[train_idx, :], train_X.loc[valid_idx, :]\n y_train, y_valid = y[train_idx], y[valid_idx]\n \n X_train = X_train.to_numpy()\n X_valid = X_valid.to_numpy()\n y_train = y_train.to_numpy()\n y_valid = y_valid.to_numpy()\n\n X_test = test_X.to_numpy()\n\n print(f'fold: {fold+1}, X_train.shape: {X_train.shape}, X_valid.shape: {X_valid.shape}')\n\n # -- Catboost, train\n clf = CatBoostClassifier(**model_params) \n clf.fit(\n X_train, y_train, \n eval_set=(X_valid, y_valid),\n early_stopping_rounds=50, \n verbose=20\n )\n\n # -- Prediction/Validation/Scoring\n valid_preds = clf.predict_proba(X_valid)[:, 1]\n y_oof[valid_idx] = valid_preds\n\n print(f\"Fold {fold + 1} | AUC: {roc_auc_score(y_valid, valid_preds)}\")\n print('-'*80)\n\n score += roc_auc_score(y_valid, valid_preds) / folds\n test_preds += clf.predict_proba(X_test)[:, 1] / folds\n \n del X_train, X_valid, y_train, y_valid\n gc.collect()\n \n print(f\"\\nMean AUC = {score}\") # validation score\n print(f\"OOF AUC = {roc_auc_score(y, y_oof)}\") # oof validation\n \n return y_oof, test_preds" ]
[ "0.7361523", "0.6822073", "0.67983013", "0.67981595", "0.679089", "0.6743333", "0.661887", "0.6546391", "0.6534486", "0.64706314", "0.6468548", "0.6424231", "0.6401285", "0.6376128", "0.6340506", "0.6315139", "0.6280791", "0.6203428", "0.61945695", "0.61933863", "0.6171417", "0.6163645", "0.6154589", "0.6138037", "0.6118767", "0.61068034", "0.6090564", "0.6066773", "0.6065875", "0.60653687", "0.6064921", "0.6060831", "0.605588", "0.6047692", "0.6041726", "0.6024063", "0.60092556", "0.60085124", "0.59948444", "0.597549", "0.59556776", "0.5955542", "0.5952689", "0.595187", "0.5942775", "0.5941844", "0.59399605", "0.5936132", "0.59290344", "0.5923429", "0.5918508", "0.5916552", "0.5911631", "0.58961016", "0.58923817", "0.58778757", "0.58712226", "0.5870998", "0.586894", "0.58684355", "0.58640623", "0.5860812", "0.58557487", "0.58516043", "0.5847695", "0.58461356", "0.5843811", "0.5840511", "0.5830971", "0.5827945", "0.58060056", "0.5804077", "0.57689637", "0.57672423", "0.5761345", "0.575426", "0.57404", "0.5739786", "0.57391816", "0.5730022", "0.57239884", "0.5706512", "0.57055354", "0.5702058", "0.56930304", "0.5684835", "0.56817746", "0.5674007", "0.56730145", "0.5671934", "0.5671617", "0.5670291", "0.5668925", "0.5658045", "0.56561804", "0.56517094", "0.5649137", "0.564605", "0.5645227" ]
0.6448152
11
Fit the logistic regression model.
def _fit(self): clf = LogisticRegression() clf.fit(inputs, labels) return clf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_reg(x_train, y_train):\n\n log_reg_classifier = LogisticRegression(max_iter=1000, solver='lbfgs')\n log_reg_classifier.fit(x_train, y_train)\n return log_reg_classifier\n\n # log_reg_classifier.fit(x_train, y_train)", "def _fit(self, _X, _y):\n\n self.model = linear_model.LogisticRegression(penalty=self.penalty, random_state=self.seed,\n solver='saga', n_jobs=self.n_jobs)\n self.model.fit(_X, _y)", "def fit(self, x, y):\n # Note Logistic Regression Runtime\n start_time = time.time()\n\n # Converting Pandas DataFrame to Numpy arrays\n if not type(x).__module__ == np.__name__:\n x = x.to_numpy()\n if not type(y).__module__ == np.__name__:\n y = y.to_numpy()\n\n # Insert a column of 1 in the feature vector X for the bias term in the weights\n x = np.insert(x,0,1,axis=1)\n \n # Verify dimension of input\n if len(x) != len(y):\n print(\"The number of input features vector must be to be the same as the number of target variables\")\n else:\n losses = self.gradient_descent(x,y)\n\n # Note end time\n end_time = time.time()\n\n # Log runtime\n print(\"Logistic Regression training time: {0:.2f}s\".format(end_time - start_time))\n \n return losses", "def train_logistic_regression(train_x, train_y):\n\n logistic_regression_model = LogisticRegression(penalty='l2', C=1.0)\n logistic_regression_model.fit(train_x, train_y)\n return logistic_regression_model", "def train_logistic_regression(X_train_input, y_train_input, C=1):\r\n from sklearn.linear_model import LogisticRegression\r\n logr_clf = LogisticRegression(C=C)\r\n logr_clf.fit(X_train_input, y_train_input)\r\n return logr_clf", "def fit_logistic_regression():\n\n logger.debug(\"Running the fit_logistic_regression function now\")\n\n #Loading the configuration\n with open(os.path.join(\"config\",\"config.yml\"), \"r\") as f:\n config = yaml.safe_load(f)\n\n #Loading and pre processing the data\n logger.debug(\"Loading and pre processing the data\")\n train_df = load_data(config[\"load_data\"][\"train_file\"])\n train_df = pre_process_data(train_df, resample = True, resample_count = 500000)\n\n #Defining Pipeline\n pipeline = Pipeline([\n ('tfidf', TfidfVectorizer(analyzer='word', token_pattern=r'[A-Za-z0-9@-]+')),\n ('model', LogisticRegression(random_state=12345, verbose = 1, solver = 'saga')),\n ])\n\n #Defining parameters to vary\n parameters = {\n 'tfidf__max_df': (0.25, 0.5, 0.75),\n 'tfidf__max_features': (None, 5000, 10000, 50000),\n 'tfidf__ngram_range': ((1, 1), (1, 2)),\n 'model__C': (0.01, 1, 100)\n }\n\n scoring_list = [\"accuracy\", \"f1\", \"precision\", \"recall\", \"roc_auc\"]\n \n #Performing 5fold CV to determine best hyperparameters\n model = GridSearchCV(pipeline, parameters, cv=5,\n n_jobs=-1, verbose=1, scoring=scoring_list, refit='f1',)\n\n t0 = datetime.datetime.now()\n\n model.fit(train_df[\"Review\"].tolist(), train_df[\"Ratings\"].to_numpy())\n \n logger.info(\"Grid Search performed in {}\".format(str(datetime.datetime.now()-t0)))\n\n #Saving results\n res_df = pd.DataFrame(model.cv_results_)\n res_df.to_csv(os.path.join(config[\"summary_stats\"][\"save_location\"], \"LogisticRegressionResults.csv\"))\n \n #Saving the model\n pickle.dump(model, open(os.path.join(config[\"models\"][\"save_location\"], \"LogisticRegression.pkl\"),'wb'))\n\n return", "def train_logistic_regression(x_train, y_train, learning_rate, fit_intercept=False, max_iter=500):\r\n if fit_intercept:\r\n intercept = np.ones(x_train.shape[0], 1)\r\n x_train = np.hstack((intercept, x_train)) # hstacks merges 2 arrays column wise\r\n weights = np.zeros(x_train.shape[1])\r\n for iteration in range(max_iter):\r\n weights = update_weights(x_train, y_train, weights, learning_rate)\r\n # printing cost for every 100 iterations\r\n if iteration % 100 == 0:\r\n print(calculate_cost(x_train, y_train, weights))\r\n return weights", "def train_model(self):\r\n alpha, accuracy_rate = self.select_model()\r\n # Initialize logistic regression with alpha(learning rate)\r\n lg = logisticregression(C=alpha)\r\n # Train the model.\r\n lg.fit(self.training_data, self.target_data)\r\n # Save the trained model as .pkl file.\r\n joblib.dump(value=lg, filename=self.intention_id+'.pkl', compress=1)\r\n print \"Estimated Parameters of Logistic Regression\"\r\n # Estimated parameters of logistic regression.\r\n print lg.get_params()", "def logistic_fit(self, penalty: str = 'l2', c: float = 1.0):\r\n self.LogisticModel = LogisticRegression(solver='liblinear', penalty=penalty, C=c).fit(self.x, self.y)", "def test_train_logist(x_train_variable, y_train_dep):\n # Ensure the function works\n try:\n lrc = cls.train_logistic(x_train_variable, y_train_dep)\n logging.info(\"Successful Logistic Model\")\n except Exception as err:\n logging.error(\"Errors in Fitting the Logistic Regression\")\n raise err\n return lrc", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n return least_squares_SGD(y, tx, initial_w, max_iters, gamma, loss_function=logistic_loss, gradient=logistic_grad)", "def run_logistic(X_train, X_test, y_train, y_test, C=1, penalty = 'l2', solver = 'lbfgs'):\n \n logreg = LogisticRegression(fit_intercept=True, C=C, penalty = penalty, solver = solver)\n logreg.fit(X_train, y_train)\n get_scores(logreg, X_train, X_test, y_train, y_test)", "def LogisticRegression_sklearn(X_train, X_test, y_train, y_test):\n\n\tlog_reg = LogisticRegression()\n\tlog_reg.fit(X_train, y_train.ravel())\n\tyPred =log_reg.predict(X_test)\n\n\t#Printing metrics of the logistic regression model\n\tprint('Accuracy:', metrics.accuracy_score(y_test, yPred))\n\tprint('Precision:', metrics.precision_score(y_test, yPred))\n\tprint('Recall', metrics.recall_score(y_test, yPred))\n\n\t#confusion matrix\n\n\tconfusionMatrix = matrix.confusion_matrix(y_test, yPred)\n\tsb.heatmap(pd.DataFrame(confusionMatrix), annot= True, fmt='g')\n\tplt.title('Confustion matrix with default value 1')\n\tplt.ylabel('True values')\n\tplt.xlabel('Predicted values')\n\tplt.show()", "def logistic(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n\n return f, df, y", "def run_logistic_regression(training, testing, feature_cols, outcome_col):\n if 'intercept' not in training.columns:\n training['intercept'] = 1\n if 'intercept' not in testing.columns:\n testing['intercept'] = 1\n intercept_feature_cols = feature_cols + ['intercept']\n logit = sm.Logit(training[outcome_col], training[intercept_feature_cols])\n fitted_logit_model = logit.fit()\n logit_diagnostics = get_diagnostics(testing[outcome_col], testing[intercept_feature_cols], fitted_logit_model, model_type = 'logit')\n predicted_logit_probs = fitted_logit_model.predict(testing[intercept_feature_cols])\n\n return fitted_logit_model, logit_diagnostics, predicted_logit_probs", "def fit_and_predict_LR(X_train, Y_train, X_test):\n\n # Import the package\n from sklearn.linear_model import LogisticRegression\n\n #referenced to sklearn documentation \n \n # fit the model... \n clf = LogisticRegression().fit(X_train, Y_train) \n # make predictions \n predicted_LR = clf.predict(X_test)\n return predicted_LR\n ### END SOLUTION ### ", "def sklearn_model(train_data):\n X, y = train_data\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n model = LogisticRegression(\n multi_class=\"multinomial\", solver=\"lbfgs\", max_iter=1000\n )\n model.fit(X, y)\n return model", "def fit_data(self, data):\n d = log(-log(data))\n return dot(d, self._fit_matrix.T)", "def fit(self, X, y):\n if self._intercept:\n X = self._add_intercept(X)\n\n if self._theta is None or (self._theta and self._theta.shape != X.shape[1]):\n self._theta = np.zeros(X.shape[1]) # Initialize parameters\n\n for n in range(self._n_iter):\n h = sigmoid(np.dot(X, self._theta))\n dW = np.dot(X.T, (y-h))\n self._theta += self._lr * dW\n if (n % (self._n_iter/10) == 0): # Print every 10% of total training iterations\n print(\"Train Accuracy: \", binary_accuracy(y, h)) \n print(\"Train Loss: \", binary_cross_entropy(y, h))", "def logistic_predict(self, x: np.array) -> np.array:\r\n if self.LogisticModel is None:\r\n print('Logistic Model not trained, please run logistic_fit first!')\r\n return None\r\n else:\r\n return self.LogisticModel.predict(x)", "def logistic_regression(y, tx, initial_w, max_iters, gamma, verbose=False):\n return gradient_descent(y, tx, initial_w, max_iters, gamma, \n compute_logistic_loss, compute_logistic_gradient, verbose=verbose)", "def fit(self, X):", "def train_logisticRegression(data: np.array, labels: np.array)->None:\n\n n_examples = np.size(data, 0)\n n_features = np.size(data, 1)\n n_categories = np.size(labels, 1)\n\n data = np.hstack((np.ones((n_examples, 1)), data))\n\n print(data[0:5, :])\n\n X_train, X_test, y_train, y_test, idx_test = split_data(data, labels, 0.7)\n\n convergence_goal = 1e-3\n learning_rate = 0.01\n\n theta = np.random.uniform(size=((n_features+1, n_categories)))\n\n for i in range(n_categories):\n\n cost_var = 1\n\n previous_cost = 1e6\n iterations = 0\n cost_to_plot = []\n\n while cost_var > convergence_goal:\n iterations += 1\n cost, grad = costFunction(X_train, y_train[:, i], theta[:, i])\n theta[:, i] = update_theta(theta[:, i], grad, learning_rate)\n cost_var = previous_cost - cost\n previous_cost = cost\n if iterations == 1: cost_var = 1\n cost_to_plot.append(cost)\n # print(cost)\n\n plt.plot(range(iterations), cost_to_plot, 'g-', label = 'cost')\n plt.xlabel('iterations')\n plt.ylabel('cost')\n # plt.show()\n\n predictions = lrPredict(theta, X_test)\n\n print(predictions[0:5, :])\n print(y_test[0:5, :])\n\n accuracy = np.mean([p == l for p, l in zip(predictions, y_test)])\n print(\"Accuracy = {}\".format(accuracy))\n\n pass", "def fit(self, X,y):\n pass", "def fit(self, X, y, max_iter=MAX_ITER):\n X = self.normalize_data(X)\n X = self.add_bias(X)\n n_tasks = len(X)\n n_feats = X[0].shape[1]\n W = np.random.randn(n_feats, n_tasks)\n start = time.time()\n cost_function = 0\n for t in range(n_tasks):\n #print('Training {} task with lasso regression'.format(t))\n lasso = LogisticRegression(C=self.lambda_1,\n penalty='l1',\n max_iter=max_iter)\n lasso.fit(X[t], y[t])\n W[:, t] = lasso.coef_\n stop = time.time() - start\n self.W = W\n return W, np.array([cost_function]), stop", "def logistic_regression(y, tx, initial_w, max_iters, gamma, debug = False):\n losses, ws = gradient_descent(y, tx, initial_w, max_iters, gamma, loss_f = model_logistic.loss, grad_f = model_logistic.grad, debug = debug)\n return get_last_ans(ws, losses)", "def trainRegressionModel(X,y):\n # # instantiate a logistic regression model, and fit with X and y\n # model = LogisticRegression()\n # model = model.fit(X, y)\n # # check the accuracy on the training set\n # print(model.score(X, y))\n #X['intercept'] = 1.0\n #del X['isCapitalized']\n #del X['isNN']\n #del X['isNNP']\n #del X['isJJ']\n #del X['isUpper']\n #del X['isPrecedingIN']\n logit = sm.Logit(y, X)\n result = logit.fit()\n print(result.summary())\n print(result.conf_int())\n model = LogisticRegression()\n model = model.fit(X, y)\n print(model.score(X, y))\n print(y.mean())\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n model2 = LogisticRegression()\n model2.fit(X_train, y_train)\n # predict class labels for the test set\n predicted = model.predict(X_test)\n print(predicted)\n for i in predicted:\n if i==1:\n print(\"Test:\"+str(i))\n print(max(predicted))\n #generate class probabilities\n probs = model2.predict_proba(X_test)\n print(probs)\n # generate evaluation metrics\n print(\"Accuracy: \"+str(metrics.accuracy_score(y_test, predicted)))\n print(\"AUC: \"+str(metrics.roc_auc_score(y_test, probs[:, 1])))\n print(metrics.confusion_matrix(y_test, predicted))\n print(metrics.classification_report(y_test, predicted))\n\n from sklearn.cross_validation import cross_val_score\n # evaluate the model using 10-fold cross-validation\n scores = cross_val_score(LogisticRegression(), X, y, scoring='accuracy', cv=10)\n print(scores)\n print(scores.mean())", "def log_reg(x, y, s):\n usx = np.array(x)\n usy = np.array(y)\n\n # split data into train and validation set\n x_train, x_test, y_train, y_test = train_test_split(usx, usy, test_size=s)\n cls_log = LogisticRegression()\n cls_log.fit(x_train, y_train)\n y_predict = cls_log.predict(x_test)\n\n # select only the probabilities of being fraud\n y_pred_prob = cls_log.predict_proba(x_test)[:, 1]\n return y_predict, y_test, y_pred_prob", "def __init__(self, reg_penalty='l2', reg_inv=1.0, k_fold=5, random_state=0):\n print(\"Initialize model Logistic Regression\")\n self.reg_penalty = reg_penalty\n self.reg_inv = reg_inv\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.LogisticRegression(penalty=self.reg_penalty,\n C=self.reg_inv,\n max_iter=1000, \n random_state=self.random_state)", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y):", "def train(self, trainfile):\r\n sentences_emb,labels=self.read_data(trainfile)\r\n logReg = LogisticRegression(penalty=\"l2\",C = 10, multi_class='auto',solver='newton-cg')\r\n logReg.fit(sentences_emb,labels)\r\n self.clf=logReg", "def fit_logregL1(data, C=.01):\n from sklearn.linear_model import LogisticRegression\n n,m = data.shape\n \n # TODO: just build (sparse) L directly & construct with it\n # for each Xi, estimate the neighborhood of Xi using L1-reg logistic regression:\n nbrs,th_ij,th_i = [None]*n, [None]*n, np.zeros((n,))\n Xtr, Xtmp = toPM(data), toPM(data) # make two copies so we can modify\n for i in range(n): \n Xtmp[i,:] = 0. # remove ourselves\n lr = LogisticRegression(penalty='l1',C=C,solver='liblinear').fit(Xtmp.T,Xtr[i,:])\n nbrs[i] = np.where(np.abs(lr.coef_) > 1e-6)[1]\n th_ij[i]= lr.coef_[0,nbrs[i]]/2.\n th_i[i] = lr.intercept_/2.\n Xtmp[i,:] = Xtr[i,:]; # & restore after\n \n # Collect single-variable factors\n factors = [Factor(Var(i,2),[-t,t]).exp() for i,t in enumerate(th_i)]\n\n # Collect non-zero pairwise factors\n for i in range(n):\n for jj,j in enumerate(nbrs[i]):\n # TODO: FIX: double counts edges? use different combination methods?\n scope = [Var(i,2),Var(int(j),2)]\n t = th_ij[i][jj]\n factors.append( Factor(scope, [[t,-t],[-t,t]]).exp() )\n \n # Build a model from the factors\n return Ising(factors)", "def logistic_predict(weights, data):\n\n # TODO: Finish this function\n\n return y", "def fit(self, *, printing=False):\r\n _df_input_conditions(self._X, self._y)\r\n\r\n model = sm.Logit(self._y, sm.add_constant(self._X))\r\n\r\n if printing:\r\n print(\"Model fitting in progress...\")\r\n with _SuppressPrints(): # hide Statsmodels printing\r\n self._results = model.fit()\r\n self._results_output = self._results.summary(alpha=self._alpha)\r\n\r\n model_selection_dict = {\"log_likelihood\": self._results.llf,\r\n \"r_squared_pseudo\": self._results.prsquared,\r\n \"aic\": self._results.aic,\r\n \"bic\": self._results.bic}\r\n self._model_selection_stats = model_selection_dict\r\n self._log_likelihood = self._results.llf\r\n self._odds_ratios = pd.Series(np.exp(self._results.params\r\n .drop('const')),\r\n name='odds_ratios')\r\n\r\n self._standardize_results()\r\n\r\n self._is_fitted = True\r\n if printing:\r\n print(\"Model fitted.\")\r\n\r\n return self", "def fit(self, X, y, **fit_params):\n ...", "def logistic_regression_model_by_features(xTrain, yTrain, features, iter_step, resolution, initial_w0, step, max_iters):\r\n\r\n model = lgm.LogisticRegressionModel(initial_w0=initial_w0,\r\n initial_weights=[0.0] * len(features))\r\n\r\n # Extend xTrains and xTest with 1 at [0]\r\n xTrain = [[1] + x for x in xTrain]\r\n\r\n for i, iters in enumerate([iter_step] * resolution):\r\n fit_tic = time.time()\r\n model.fit(xTrain, yTrain, iterations=iters, step=step)\r\n fit_toc = time.time() - fit_tic\r\n iter_cnt = iter_step * (i + 1)\r\n print(\"Took {} sec. Fitted data for {} iterations\".format(fit_toc, iter_cnt))\r\n\r\n return model", "def fit(self, X, Y):\n ...", "def fit_model(self):\n logger.info('Fitting model')\n if self.traj_dict is None:\n self.traj_dict = self.get_traj_dict()\n self.model.fit(self.traj_dict.values())", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def analysis(houses:pd.DataFrame) -> None:\n \n \"\"\"\n #Me just trying to fit the data without any outside influences\n f= f'SELLER_HOUSE ~ SQFT_PER + PRICE + C(LOCATION)' \n result= smf.logit(formula= str(f), data= houses).fit()\n print(result.summary2())\n y= ['SELLER_HOUSE']\n x= ['SQFT_PER', 'PRICE', 'LOC_699 - Not Defined', 'LOC_AA - Airport Area', 'LOC_CG - Columbus Grove',\n 'LOC_CV - Cypress Village', 'LOC_EASTW - Eastwood', 'LOC_EC - El Camino Real', 'LOC_GP - Great Park',\n 'LOC_IRSP - Irvine Spectrum', 'LOC_LGA - Laguna Altura', 'LOC_NK - Northpark', 'LOC_NW - Northwood', \n 'LOC_OC - Oak Creek', 'LOC_OH - Orchard Hills', 'LOC_OT - Orangetree', 'LOC_PS - Portola Springs', \n 'LOC_QH - Quail Hill', 'LOC_SH - Shady Canyon', 'LOC_SJ - Rancho San Joaquin', 'LOC_STG - Stonegate', \n 'LOC_Stonegate', 'LOC_TR - Turtle Rock', 'LOC_TRG - Turtle Ridge', 'LOC_UP - University Park',\n 'LOC_UT - University Town Center', 'LOC_WB - Woodbridge', 'LOC_WD - Woodbury', \n 'LOC_WI - West Irvine', 'LOC_WN - Walnut (Irvine)', 'LOC_WP - Westpark']\n x_train, x_test, y_train, y_test= train_test_split(houses[x], houses[y], test_size= 0.3, random_state= 500)\n logreg = LogisticRegression()\n logreg.fit(x_train, y_train.values.ravel())\n y_pred= logreg.predict(x_test)\n print('Accuracy of logistic regression classifier on test set:', round(logreg.score(x_test, y_test), 3))\n # This model is really bad\n \n \"\"\"\n \n \"\"\n houses= houses.drop(['DAYS_ON_MARKET', 'ADDRESS', 'LOCATION',\n 'STATUS', 'PROPERTY_TYPE', 'ZIP_CODE'], axis= 1)\n columns= houses.columns.values.tolist()\n y= ['SELLER_HOUSE']\n x= [i for i in columns if i not in y]\n \n # Over Sampling Using SMOTE \n x_train, _, y_train, _= train_test_split(houses[x], houses[y], test_size= 0.3, random_state= 500)\n x_columns= x_train.columns\n \n os= SMOTE(random_state= 0)\n os_x, os_y= os.fit_sample(x_train, y_train)\n os_x= pd.DataFrame(data= os_x, columns= x_columns)\n os_y= pd.DataFrame(data= os_y, columns= y)\n \n \n #Recursive Feature Elimination\n logreg= LogisticRegression(max_iter= 600)\n rfe= RFE(logreg, 20)\n rfe= rfe.fit(os_x, os_y.values.ravel())\n \n lst= [i for count, i in enumerate(x) if rfe.support_[count] == True]\n X= os_x[lst]\n Y= os_y['SELLER_HOUSE']\n \n \n #logit_model= sm.Logit(Y, X)\n #result= logit_model.fit()\n #print(result.summary2()) # Model choosen by RCE\n \n #These are features have a p-value less than 0.05\n final_x= ['BATHS', 'ZIP_92602.0', 'ZIP_92618.0', 'LOC_699 - Not Defined', 'LOC_TR - Turtle Rock', 'LOC_WD - Woodbury']\n #final_x= ['ZIP_92602.0', 'LOC_699 - Not Defined', 'LOC_TR - Turtle Rock', 'LOC_WD - Woodbury']\n X2= os_x[final_x]\n \n logit_model2= sm.Logit(Y, X2)\n result2= logit_model2.fit()\n print(result2.summary2()) # Final Model\n \n x_train2, x_test2, y_train2, y_test2= train_test_split(X2, Y, test_size= 0.3, random_state= 500)\n logreg = LogisticRegression()\n logreg.fit(x_train2, y_train2)\n \n y_pred= logreg.predict(x_test2)\n print('Accuracy of logistic regression classifier on test set:', round(logreg.score(x_test2, y_test2), 2))\n \n conf_matrix= confusion_matrix(y_test2, y_pred)\n print(conf_matrix)\n # So 22+61 correct predictions and 13+44 wrong predictions\n \n logit_roc_auc = roc_auc_score(y_test2, logreg.predict(x_test2))\n fpr, tpr, _ = roc_curve(y_test2, logreg.predict_proba(x_test2)[:,1])\n plt.figure()\n plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)\n plt.plot([0, 1], [0, 1],'r--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()\n \"\"", "def logistic(weights, data, targets, hyperparameters):\n y = logistic_predict(weights, data)\n\n #####################################################################\n # TODO: #\n # Given weights and data, return the averaged loss over all data #\n # points, gradient of parameters, and the probabilities given by #\n # logistic regression. #\n #####################################################################\n f = None\n df = None\n\n f = evaluate(targets, y)[0]\n\n N = len(data)\n M = len(weights) - 1 \n temp = np.ones([N, M + 1])\n temp[: N, : M] = np.array(data)\n\n\n df = np.zeros([M+1, 1])\n\n df[:, 0] = np.array([[np.mean([(y.flatten()[i] - targets.flatten()[i]) * temp[i][j] for i in range(0, N)]) for j in range(0, M + 1)],])\n\n # df = np.matrix([[np.mean([(y[i] - targets[i]) * temp[i][j] for i in range(0, N)]) for j in range(0, M + 1)],])\n\n #####################################################################\n # END OF YOUR CODE #\n #####################################################################\n return f, df, y", "def fit(self):\n self.lr = LRHMC( self.X_train, self.X_test, self.y_train, self.y_test )\n self.lr.fit()", "def stability_logistic(x, y, **kwargs):\n rlr = RandomizedLogisticRegression(n_jobs=kwargs.get('n_jobs', 4))\n if 'param' in kwargs:\n rlr.set_params(**kwargs['param'])\n rlr.fit(x, y)\n return rlr.get_support()", "def train(self):\n df = self.df\n self.scaler = MinMaxScaler()\n self.scaler.fit(df)\n df[df.columns] = self.scaler.transform(df)\n\n\n X_train, y_train = get_X_y(df, self.n_days, self.length , self.style)\n X_train = np.array(X_train)\n X_train.shape = (X_train.shape[0], X_train.shape[2])\n\n self.clf = LogisticRegression().fit(X_train, y_train)\n\n #es = EarlyStopping(monitor = 'accuracy',mode = 'min' , verbose = 1, patience = 100, restore_best_weights = True)", "def fit(self, x, y, logger):\n history = self.model1.fit(x=x, y=y, batch_size=self.batch_size, epochs=self.epochs)\n logger.log({'ValFuncLoss': history.history['loss'][-1]})", "def logisticcv_fit(self, nsplits: int, penalty: str = 'l2'):\r\n c_cand = [1e-4, 1e-3, 1e-2, 5e-2, 1e-1, 2e-1, 3e-1, 4e-1, 5e-1, 8e-1, 1, 2, 5, 10, 20, 50, 100]\r\n self.LogisticModel = LogisticRegressionCV(\r\n solver='liblinear', Cs=c_cand, cv=nsplits, penalty=penalty).fit(self.x, self.y)", "def fit(self, X, Y, **fit_params):\n ...", "def fit(self, X, Y, **fit_params):\n ...", "def fit_cpe(self, x, y):\n self.cpe_model = LogisticRegressionCV(solver='liblinear')\n self.cpe_model.fit(x, y)", "def fit(self, X):\n raise NotImplementedError", "def do_stats_model(x, y):\n Xx = sm.add_constant(x)\n sm_logit = sm.Logit(y, Xx)\n result = sm_logit.fit()\n print result.summary()\n result.pred_table()\n # linear model\n print \"linear regression model:\\n\"\n sm_linear = sm.OLS(y, Xx)\n result = sm_linear.fit()\n print result.summary()", "def fit(self, x, y, weights=None, **kwargs):\n from scipy.optimize import fmin_l_bfgs_b\n\n assert len(y) == x.shape[0]\n assert weights is None or len(weights) == x.shape[0]\n\n y0 = y == 0\n x0 = x[y0, :]\n x1 = x[~y0, :]\n\n if weights is None:\n loss_weights = None\n else:\n loss_weights = [weights[y0], weights[~y0]]\n\n def _loss_for_optimize(params):\n return LogisticRegression._loss_gradient(x0, x1, params[0], params[1:], self.lam, loss_weights)\n\n params0 = np.zeros(1 + x.shape[1])\n params_opt, loss_opt, info_opt = fmin_l_bfgs_b(_loss_for_optimize, params0, disp=0, **kwargs)\n print((\"%s funcalls: %s\" % (info_opt['task'], info_opt['funcalls'])))\n\n self.b = params_opt[0]\n self.w = params_opt[1:]", "def spark_LogisticRegression(*args, **kwargs): \n return LogisticRegression(*args, **kwargs)", "def fit(self, X, y=None):\n # train on a training dataset\n self.logger.info(\n self.__name__ + ' is trained on {:d} samples with {:d} features.'.format(X.shape[0], X.shape[1]))\n pass", "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma, verbose=False): \n reg_loss, reg_grad = add_l2_reg(compute_logistic_loss, \n compute_logistic_gradient,\n lambda_)\n \n return gradient_descent(y, tx, initial_w, max_iters, gamma, reg_loss, reg_grad)", "def predict_logit(self, x):\n with torch.no_grad():\n y_ = self.tr_model(x)\n return y_", "def fit(self, X, y=..., **fit_params):\n ...", "def fit(self, x):\n pass", "def fit(self, X, y):\n\n\t\trgen = np.random.RandomState(self.random_state)\n\t\tself.w_ = rgen.normal(loc=0.0, scale=0.01, size=1+X.shape[1])\n\t\tself.cost_ = []\n\n\t\tfor i in range(self.n_iter):\n\t\t\tnet_input = self.net_input(X)\n\t\t\toutput = self.activation(net_input)\n\t\t\terrors = y - output\n\t\t\tself.w_[1:] += self.eta * X.T.dot(errors)\n\t\t\tself.w_[0] += self.eta * errors.sum()\n\t\t\tcost = - y.dot(np.log(output)) - ((1-y).dot(np.log(1-output)))\n\t\t\tself.cost_.append(cost)\n\t\treturn self", "def logistic_regression(y, tx, initial_w, max_iters, gamma):\n\tif len(initial_w.shape)==2:\n\t\tinitial_w = initial_w.reshape((max(initial_w.shape)))\n\tif len(y.shape)==2:\n\t\ty = y.reshape((max(y.shape)))\n\n\tw = logistic_regression_gradient_descent(y, tx, initial_w, max_iters, gamma)\n\t\n\tloss = calculate_nll(y, tx, w)\n\n\treturn w, loss", "def fit(self, X, y):\n self.x_values = X\n self.y_values = y\n self.gradient_descent(self.coefficients, X, y)", "def logit_cost(self, theta, X, y):\n\n cost = 0.0\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n \n for i in range(0, X.shape[0]):\n cost += (y[i]-1)*theta[i] + np.log(sig[i])\n ### END YOUR CODE\n cost = cost #+ 0.01 * self.regularizer[0](self.weights)\n return cost", "def fit_model():\n global _HOME_OWNERSHIP\n _HOME_OWNERSHIP = {x: i for i, x in enumerate([\"rent\", \"own\", \"mortgage\", \"other\"])}\n df = pd.read_csv(os.path.join(settings.BASE_DIR, \"LoanStats3a.csv\"), skiprows=1).head(5000)\n df = df[df.apply(is_poor_coverage, axis=1)]\n df['year_issued'] = df.issue_d.apply(lambda x: int(x.split(\"-\")[0]))\n df_term = df[df.year_issued < 2012]\n\n bad_indicators = [\n \"Late (16-30 days)\",\n \"Late (31-120 days)\",\n \"Default\",\n \"Charged Off\"\n ]\n df_term['is_rent'] = df_term.home_ownership == \"RENT\"\n df_term = df_term[df_term.home_ownership.apply(lambda x: x is not None and x != 'NONE')]\n df_term['is_bad'] = df_term.loan_status.apply(lambda x: x in bad_indicators)\n df_term['term'] = df_term.term.apply(lambda x: x.split()[0])\n df_term['home_ownership'] = df_term.home_ownership.apply(lambda x: _HOME_OWNERSHIP[x.lower()])\n global _LENDING_PREDICT_MODEL\n _LENDING_PREDICT_MODEL = LogisticRegression()\n _LENDING_PREDICT_MODEL.fit(df_term[_FEATURES], df_term.is_bad)", "def fit(self, x, y):\n # *** START CODE HERE ***\n num_examples = x.shape[0]\n num_features = x.shape[1]\n iteration = 1\n if self.theta == None:\n self.theta = np.zeros((num_features,))\n while iteration <= self.max_iter:\n h_theta = np.dot(x, self.theta)\n g_theta = self.sigmoid(h_theta)\n J_cost = -np.mean(y*np.log(g_theta) + (1 - y)*np.log(1 - g_theta))\n H = 1/num_examples*(np.dot(np.transpose(g_theta*(1-g_theta))*np.transpose(x), x))\n J_prime = - 1/num_examples*np.dot(np.transpose(y - g_theta), x)\n d_theta = - np.linalg.solve(H, J_prime)\n self.theta += d_theta\n if np.linalg.norm(d_theta, 1) < self.eps:\n break\n if self.verbose:\n print(\"Loss value: \", J_cost)\n iteration += 1\n # *** END CODE HERE ***", "def run_logistic_regression(file_path):\n\n df_train = pd.read_csv(f'{file_path}/without_anom.csv')\n features_list = ['Direction', 'Speed']\n df_train = df_train[features_list]\n\n scalar = MaxAbsScaler()\n\n X_train = scalar.fit_transform(df_train)\n\n logistic_model = LogisticRegression()\n\n # multi_model = MultiOutputRegressor(LogisticRegression())\n #\n # multi_model.fit(X_train, X_train)\n # multi_predict = multi_model.predict(X_train)\n\n logistic_model.fit(X_train, X_train)\n predict = logistic_model.predict(X_train)", "def fit(self, x, y):\n def initiate_theta(dim):\n self.theta = np.zeros(dim)\n # print('self.theta initiated is {}'.format(self.theta))\n \n def implement_sigmoid(x):\n if self.theta is None:\n initiate_theta(x.shape[1])\n z = np.matmul(np.transpose(self.theta), np.transpose(x))\n return 1/(np.ones(x.shape[0]) + np.exp(-z))\n \n def implement_partial_loss(x, y):\n return -np.matmul(np.transpose(y - implement_sigmoid(x)), x)/x.shape[0]\n \n def implement_transposed_hess(x):\n sigmoid_hadamard = implement_sigmoid(x) * (np.ones(x.shape[0]) - implement_sigmoid(x))\n hess2 = np.diag(sigmoid_hadamard)\n hess = np.matmul(hess2,x)\n hess = np.matmul(np.transpose(x),hess)/x.shape[0]\n hess_inverse = np.linalg.inv(hess)\n return hess_inverse\n \n def train(x, y):\n count = 0\n if self.theta is None:\n initiate_theta(x.shape[1])\n while count < self.max_iter:\n if self.verbose:\n loss_y1 = np.matmul(np.transpose(y), np.log(implement_sigmoid(x)))\n loss_y0 = np.matmul(np.transpose(np.ones(x.shape[0]) - y), np.log(np.ones(x.shape[0]) - implement_sigmoid(x)))\n loss = -(loss_y1 + loss_y0 )/x.shape[0]\n print('Average empirical loss for step {} is {}'.format(count, loss))\n delta = np.matmul(implement_transposed_hess(x), implement_partial_loss(x, y))\n new_theta = self.theta - delta * self.step_size\n delta_theta = np.linalg.norm(new_theta - self.theta)\n # print('delta is {}'.format(delta_theta))\n if delta_theta < self.eps:\n return new_theta\n else:\n self.theta = new_theta\n count += 1\n return self.theta\n \n return train(x, y)", "def logistic_reg(training_data):\r\n \r\n \"\"\" Setting guesses for minimum and maximum values of regularization parameter then\r\n find the value of parameter that minimizes error on cross validation data. If\r\n local minimum is found the return this model. If not, extend minimum or maximum \r\n appropriately and repeat \"\"\"\r\n from sklearn.linear_model import LogisticRegression\r\n C_min = 1.0e-5\r\n C_max = 1.0e5\r\n regularization_flag = 1 # To set 1 until local minimum is found\r\n regularization_param = 0\r\n \r\n# while regularization_flag != 0:\r\n# regularization_param, regularization_flag = set_reg_param(training_data, cv_data, alpha_min, alpha_max)\r\n# if regularization_flag == -1:\r\n# \"\"\" The local minimum is at point less than alpha_min \"\"\"\r\n# alpha_min = alpha_min * 0.3\r\n# if regularization_flag == 1:\r\n# \"\"\" The local minimum is at point greater then alpha_max \"\"\"\r\n# alpha_max = alpha_max * 3\r\n \r\n lr = LogisticRegression (C=C_max, random_state=0)\r\n lr.fit(training_data.X, training_data.y)\r\n return lr, C_max", "def logistic(weights, data, targets, hyperparameters):\n \n t = np.transpose(np.repeat(np.reshape(weights[:-1], (len(weights)-1, 1)), len(data), axis = 1))\n f_e = data * t\n z_sums = np.sum(f_e, axis=1)\n y = sigmoid(z_sums +weights[-1])\n f = np.sum(np.log(1 + np.exp(-z_sums - weights[-1])) + (1 - np.transpose(targets)) * (z_sums + weights[-1]))\n df = np.sum(data * np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0)\n df = np.append(df, np.sum(np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0))\n df = np.reshape(df, ((len(df), 1)))\n\n return f, df, np.reshape(y, (len(y), 1))", "def fit_logistic(data,labels,learning_rate=0.1,max_iteration=1000,target_error=0.1):\n dimension = len(data[0])\n #weight vector - np.array([w1,w2,w3,w4])\n weights = np.random.uniform(low=-0.01,high=0.01,size=(dimension+1))\n iteration = 0\n \n while iteration < max_iteration:\n iteration = iteration + 1\n \n predicted_prob = np.apply_along_axis(predict, 1,data,weights)\n errors = predicted_prob - labels\n \n current_error = np.sum(computeCostV(labels,predicted_prob)) / len(data)\n print(\"Iteration {0}, error:{1}\".format(iteration,current_error))\n #stop the algorithm if target error rate is reached\n if(current_error < target_error):\n break\n \n for j in range(len(weights)):\n sum_term = np.sum([errors[i]*data[i][j-1] if j!=0 else errors[i] for i in range(len(data))])\n weights[j] = weights[j] - learning_rate * sum_term\n \n return weights", "def fit(self, x, y): \n # *** START CODE HERE ***\n y = y.reshape(y.shape[0], 1)\n y_0 = (1 - y).reshape(y.shape)\n m = y.shape[0]\n m_0 = np.asscalar(np.sum(y_0))\n m_1 = np.asscalar(np.sum(y))\n # Find phi, mu_0, mu_1, and sigma\n phi = np.sum(y) / m\n mu_0 = (np.sum(np.multiply(y_0, x), axis = 0, keepdims = True) / m_0) #.reshape(y.shape)\n mu_1 = np.sum(np.multiply(y, x), axis = 0, keepdims=True) / m_1\n sigma = getsigma(x, mu_0, mu_1, m, y, y_0)\n # Write theta in terms of the parameters\n sigma_inv = np.linalg.inv(sigma)\n log_phi = np.log(np.exp(-1 * np.log(phi)) - 1)\n theta_0 = (np.dot(np.dot(mu_0, sigma_inv), mu_0.T) - np.dot(np.dot(mu_1, sigma_inv), mu_1.T)) / 2 - log_phi\n self.theta = np.concatenate((theta_0, np.dot(sigma_inv, (mu_1 - mu_0).T)))\n # Compute cost\n x_0 = np.zeros((x.shape[0], 1)) + 1\n x_train = np.concatenate((x_0.T, x.T))\n h_theta = sigmoid(np.dot(self.theta.T, x_train)).T\n cost = - np.sum(np.dot(y.T, np.log(h_theta - (h_theta - 0.5) * self.eps)) + (np.dot(y_0.T, np.log(1 - h_theta + (h_theta - 0.5) * self.eps)))) / m\n if self.verbose:\n print(\"Cost: \" + str(cost))\n # *** END CODE HERE ***", "def logistic_regression_vec(theta, trainX, trainY):\n # Add column of ones for bias\n trainX = np.hstack((np.ones((trainX.shape[0], 1)), trainX))\n h = sigmoid(np.inner(trainX, theta))\n # np.log(1-h) can lead to problems for h = 1.0\n h = np.where(h == 1.0, 1 - 1e-12, h)\n fval = -(trainY * np.log(h) + (1 - trainY) * np.log(1 - h)).sum()\n error = h - trainY\n # Negative gradient for a minimization, must be flattened for np.minimize\n grad = np.dot(trainX.T, error).flatten()\n return fval, grad", "def logistic_regression(y, tx, initial_w=None, max_iters=100, gamma=0.009, batch_size=1):\n # init parameters\n if np.all(initial_w == None): initial_w = np.zeros(tx.shape[1])\n threshold = 1e-8\n losses = []\n y = (1 + y) / 2\n # build tx\n w = initial_w\n\n # start the logistic regression\n for i in range(max_iters):\n # get loss and update w.\n for y_batch, tx_batch in batch_iter(y, tx, batch_size=batch_size, num_batches=1):\n w, _ = learning_by_gradient_descent(y_batch, tx_batch, w, gamma)\n # converge criterion\n losses.append(calculate_loss(y,tx,w))\n if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:\n break\n #if i % int(max_iters/5) == 0:\n #print(losses[-1],i,'/{tot}'.format(tot=max_iters))\n\n return w,losses[-1]", "def logistic(self, data, weights, biases):\n\n state_weight_prods = np.dot(data, weights)\n print(-state_weight_prods - biases)\n activations = 1.0 / (1 + np.exp(-state_weight_prods - biases))\n plt.plot(state_weight_prods, activations)\n plt.show()\n return activations", "def fit(self,X,y):\n\n d = X.shape[1]\n # 1. sketch the data\n self.B,a = self._sketch(X,method=self.fd_mode)\n #H = B.T@B + (self.alpha+a)*np.eye(d)\n #self.H = H\n self.H_inv = self._get_inv() #np.linalg.pinv(H)\n self.coef_ = self.H_inv@(X.T@y) #np.linalg.solve(H, X.T@y)\n self.is_fitted = True", "def logistic(weights, data, targets, hyperparameters):\n # TODO: Finish this function\n n_data = len(data)\n dim_data = len(data[0])\n\n f = 0\n y = logistic_predict(weights, data)\n\n data = mod_data(data)\n\n # dl/dw_j = SUM(x_ij * (t_i - (1 - sigmoid(z))))\n df = np.dot(data.T, (1.0 * targets) - (1 - y))\n\n # to calculate f, we need to sum the negative log of all y iff target is 0 and (1-y) iff target is 1\n f = -1.0 * np.dot(targets.T, np.log(1 - y)) - 1.0 * np.dot(1 - targets.T, np.log(y))\n\n # calculate P(C=0|x_i) for all x_i \n return f[0,0], df, y", "def test_logistic_regression_c_parameter(params, X_train, X_test, y_train, y_test):", "def fit_training_data(self):\n self.model.fit(self.X_train)", "def logistic_train_metrics(df):\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=UserWarning)\n model_reg = dill.load(open('maa_conflict_model.dill', 'rb'))\n\n return model_reg", "def main():\r\n x = [\r\n [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ], [ 0,0 ], [ 0,0 ],\r\n [ 0,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 0,0 ], [ 1,0 ],\r\n [ 1,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ]\r\n ]\r\n\r\n # Encoding of the correct classes for the training material\r\n y = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0]\r\n b = BinaryLogisticRegression(x, y)\r\n b.fit()\r\n b.print_result()", "def logistic(self, X, w):\n g = 1 / (1 + np.exp(-X.dot(w)))\n return g", "def logistic_regression(x,y,beta_start=None,verbose=False,CONV_THRESH=1.e-3,\n MAXIT=500):\n if x.shape[-1] != len(y):\n raise ValueError, \"x.shape[-1] and y should be the same length!\"\n try:\n N, npreds = x.shape[1], x.shape[0]\n except: # single predictor, use simple logistic regression routine.\n return _simple_logistic_regression(x,y,beta_start=beta_start,\n CONV_THRESH=CONV_THRESH,MAXIT=MAXIT,verbose=verbose)\n if beta_start is None:\n beta_start = NA.zeros(npreds+1,x.dtype.char)\n X = NA.ones((npreds+1,N), x.dtype.char)\n X[1:, :] = x\n Xt = NA.transpose(X)\n iter = 0; diff = 1.; beta = beta_start # initial values\n if verbose:\n print 'iteration beta log-likliehood |beta-beta_old|' \n while iter < MAXIT:\n beta_old = beta \n ebx = NA.exp(NA.dot(beta, X))\n p = ebx/(1.+ebx)\n l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likeliehood\n s = NA.dot(X, y-p) # scoring function\n J_bar = NA.dot(X*p,Xt) # information matrix\n beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta\n diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences\n if verbose:\n print iter+1, beta, l, diff\n if diff <= CONV_THRESH: break\n iter = iter + 1\n if iter == MAXIT and diff > CONV_THRESH: \n print 'warning: convergence not achieved with threshold of %s in %s iterations' % (CONV_THRESH,MAXIT)\n return beta, J_bar, l", "def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma, debug = False):\n losses, ws = gradient_descent(y, tx, initial_w, max_iters, gamma, loss_f = model_logistic.reg_loss, grad_f = model_logistic.reg_grad, kwargs = {'lambda_': lambda_}, debug = debug)\n return get_last_ans(ws, losses)", "def fit(self, X):\n self._fit_X = X", "def train_logistic_regression(train_exs: List[SentimentExample], feat_extractor: FeatureExtractor) -> LogisticRegressionClassifier:\n lr = LogisticRegressionClassifier(feat_extractor.corpus_length, feat_extractor)\n alpha = 1e0\n # beta = 1e-4\n for epoch in range(8):\n loss = 0.\n acc = 0\n indices = np.arange(len(train_exs))\n np.random.shuffle(indices)\n for i in indices:\n feat = feat_extractor.feats[i]\n sentimentExample = train_exs[i]\n y = sentimentExample.label\n z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]\n loss += -y * np.log(z) - (1 - y) * np.log(1 - z) \\\n # + beta * np.expand_dims(lr.w, axis=0).dot(np.expand_dims(lr.w, axis=1))[0, 0]\n predict = int(feat.dot(np.expand_dims(lr.w, axis=1))[0, 0] > 0)\n acc += (predict == y)\n grad = (z - y) * feat.toarray()[0] # + 2 * beta * lr.w\n lr.w = lr.w - alpha * grad\n print(\"epoch {:d}, loss: {:f}, accuracy: {:f}\".format(epoch, loss / len(train_exs), acc / len(train_exs)))\n\n for i in indices:\n feat = feat_extractor.feats[i]\n sentimentExample = train_exs[i]\n y = sentimentExample.label\n z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]\n loss += -y * np.log(z) - (1 - y) * np.log(1 - z)\n print(\"training loss: {:f}\".format(loss / len(train_exs)))\n\n return lr", "def logistic(self,w,Xi):\n # print(w.T)\n # print(Xi)\n a = np.dot(w.T,Xi)\n return 1/(1+np.exp(-a))", "def logistic_regression(X, Y):\n m, n = X.shape\n theta = np.zeros(n)\n learning_rate = 10\n\n i = 0\n while True:\n i += 1\n prev_theta = theta\n grad = calc_grad(X, Y, theta)\n theta = theta - learning_rate * grad\n if i % 10000 == 0:\n print('Finished %d iterations' % i)\n # plot decision boundary for the ith iteration listed in i_lst\n i_lst = [1, 2, 3, 10, 100, 200, 500, 1000, 10000, 30370, 40000, 50000]\n if i in i_lst:\n save_path = \"output/p01_b_a\" + str(i) + \".png\"\n plot(X, Y, theta, save_path)\n if np.linalg.norm(prev_theta - theta) < 1e-15:\n print('Converged in %d iterations' % i)\n break\n return", "def feature_mapped_logistic_regression(power, l):\n df = pd.read_csv('ex2data2.txt', names=['test1', 'test2', 'accepted'])\n x1 = np.array(df.test1)\n x2 = np.array(df.test2)\n y = general.get_y(df)\n\n X = feature_mapping(x1, x2, power, as_ndarray=True)\n theta = np.zeros(X.shape[1])\n\n res = opt.minimize(fun=regularized_cost,\n x0=theta,\n args=(X, y, l),\n method='TNC',\n jac=regularized_gradient)\n final_theta = res.x\n\n return final_theta", "def fit(self):\n # Initialize parameter estimates\n if self.estimator is not None:\n param_estimates = self.estimator(self.xf, self.yf)\n else: param_estimates = None\n self.popt, self.pcov = curve_fit(self.model, self.xf, self.yf, \n p0=param_estimates)\n self.fit_history.append({\"popt\" : self.popt, \"pcov\" : self.pcov})", "def fit(self, X, y):\n self.X_train = X\n self.y_train = y" ]
[ "0.7822972", "0.7692005", "0.7548392", "0.7395676", "0.7204495", "0.7157888", "0.71509266", "0.70614314", "0.70379573", "0.69379884", "0.69329995", "0.68996924", "0.6820621", "0.68176067", "0.6782893", "0.67820376", "0.6755673", "0.6706559", "0.67048633", "0.6691573", "0.6682348", "0.6670216", "0.66665214", "0.66611177", "0.6623446", "0.6600694", "0.6566568", "0.65568584", "0.6548293", "0.65135336", "0.65135336", "0.65135336", "0.65015095", "0.6482425", "0.64708275", "0.6469328", "0.6466403", "0.64604205", "0.64596707", "0.6458782", "0.64456755", "0.64456755", "0.64456755", "0.64456755", "0.64456755", "0.64456755", "0.64456755", "0.64456755", "0.64456755", "0.64456755", "0.64435655", "0.64153", "0.64148533", "0.64101505", "0.6402521", "0.63974345", "0.6370589", "0.6360726", "0.6360726", "0.6345247", "0.63422745", "0.6339517", "0.63292944", "0.6326338", "0.630563", "0.63013995", "0.629358", "0.6266805", "0.6263078", "0.62582684", "0.6257755", "0.6256036", "0.6240988", "0.6234967", "0.6227426", "0.6221497", "0.6210096", "0.6201013", "0.61916506", "0.61907", "0.6184571", "0.61792773", "0.6177863", "0.6160087", "0.61600393", "0.61588705", "0.6156362", "0.6152127", "0.6142072", "0.6137512", "0.61338514", "0.6104876", "0.61038494", "0.60958236", "0.60905087", "0.6080483", "0.60789865", "0.60787654", "0.60487324", "0.60473764" ]
0.7538954
3
Return the score obtained on the test data set
def score(self, test_data): ins, outs = self._split_inputs_outputs(test_data) return self.model.score(ins, outs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def score(self, test_data):\n\n\t\tpass", "def score(self, test_data):\n\n\t\tins, outs = self._split_inputs_outputs(test_data)\n\n\t\t# One hot encode the input/labels\n\t\tencoder = LabelEncoder()\n\t\tencoder.fit(outs)\n\t\tenc_labels = encoder.transform(outs)\n\t\tenc_labels = np_utils.to_categorical(enc_labels)\n\n\t\t_, score = self.model.evaluate(ins, enc_labels, verbose=2)\n\n\t\treturn score", "def score(self, X_test, y_test):\r\n counter = 0\r\n sr = self.predict(X_test)\r\n for i in range(len(y_test)):\r\n if sr[i] == y_test[i]:\r\n counter += 1\r\n return counter / len(y_test)\r\n pass", "def score(self, X_test, y_test):\n correct = []\n for one in X_test:\n correct.append(self.predict(one))\n try:\n return sum(0 if correct[i] != y_test[i] else 1 for i in range(len(X_test))) / len(\n X_test\n )\n except ZeroDivisionError:\n pass", "def score_one(self, test_data):\n\n\t\ttest_in, test_labels = self._split_inputs_outputs(test_data)\n\t\tcorrect = 0\n\t\ttotal = 0\n\n\t\tfor i, test_input in enumerate(test_in):\n\t\t\tprediction = self.model.predict(test_input.reshape(1,-1))\n\t\t\tif prediction[0] == test_labels[i]:\n\t\t\t\tcorrect+=1\n\t\t\ttotal+=1\n\t\treturn float(correct)/total", "def score_one(self, test_data):\n\n\t\ttest_in, test_labels = self._split_inputs_outputs(test_data)\n\t\tcorrect = 0\n\t\ttotal = 0\n\n\t\tfor i, test_input in enumerate(test_in):\n\t\t\tprediction = self.model.predict(test_input.reshape(1,-1))\n\t\t\tif prediction[0] == test_labels[i]:\n\t\t\t\tcorrect+=1\n\t\t\ttotal+=1\n\t\treturn float(correct)/total", "def getScore(data):\n return score", "def test_get_score(self):\r\n score_dict = self.combinedoe.get_score()\r\n self.assertEqual(score_dict['score'], 15.0)\r\n self.assertEqual(score_dict['total'], 5.0)", "def score(self, X_test: List[str], y_test: List[str]) -> int:\n predictions_count = 0\n right_predictions_count = 0\n\n for i in range(len(X_test)):\n label = self.predict(X_test[i].split())\n predictions_count += 1\n right_predictions_count += 1 if label == y_test[i] else 0\n\n return right_predictions_count / predictions_count", "def test_get_score(self):\r\n score_dict = self.combinedoe.get_score()\r\n self.assertEqual(score_dict['score'], 0)\r\n self.assertEqual(score_dict['total'], 1)", "def test_scores(self) -> np.ndarray:\n return np.asarray(self.test_metric_dict[self.metric_name])", "def get_score(self, solution: np.array) -> float:\n pass", "def score(self):\n\n\t\tsplits = 10\n\t\tscore = 0\n\n\t\tkf = KFold(n_splits=splits, shuffle=True)\n\t\tkf.get_n_splits(self.data)\n\n\t\tfor train_ind, test_ind in kf.split(self.data):\n\n\t\t\ttrain = [self.data[ind] for ind in train_ind]\n\t\t\ttest = [self.data[ind] for ind in test_ind]\n\n\t\t\tself.model = self._fit(train)\n\t\t\ttemp_score = self.score_one(test)\n\t\t\tscore += temp_score\n\n\t\treturn score/float(splits)", "def score(self):\n\n\t\tsplits = 10\n\t\tscore = 0\n\n\t\tkf = KFold(n_splits=splits, shuffle=True)\n\t\tkf.get_n_splits(self.data)\n\n\t\tfor train_ind, test_ind in kf.split(self.data):\n\n\t\t\ttrain = [self.data[ind] for ind in train_ind]\n\t\t\ttest = [self.data[ind] for ind in test_ind]\n\n\t\t\tself.model = self._fit(train)\n\t\t\ttemp_score = self.score_one(test)\n\t\t\tscore += temp_score\n\n\t\treturn score/float(splits)", "def score(self, test_index):\n y_pred = self.predict(test_index)\n mae = mean_absolute_error(self._y[test_index], y_pred)\n return mae", "def score(self, test_index):\n y_pred = self.predict(test_index)\n mae = mean_absolute_error(self._y[test_index], y_pred)\n return mae", "def score(self):", "def get_score(score_map, test_result):\n if test_result < score_map[20]:\n return int((test_result / score_map[20]) * 20)\n elif test_result < score_map[40]:\n return int(20 + (test_result - score_map[20]) / (score_map[40] - score_map[20]) * 20)\n elif test_result < score_map[60]:\n return int(40 + (test_result - score_map[40]) / (score_map[60] - score_map[40]) * 20)\n elif test_result < score_map[85]:\n return int(60 + (test_result - score_map[60]) / (score_map[85] - score_map[60]) * 20)\n elif test_result < score_map[100]:\n return int(85 + (test_result - score_map[85]) / (score_map[100] - score_map[85]) * 20)\n else:\n return 100", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_scores(self):\n return self.score", "def test_score():\n print(\"Tests for 'score' function\")\n test_suite = TestSuite()\n\n # Testing with empty hand\n result = score([])\n test_suite.run_test(result, 0, '0')\n # Testing with non-empty hand\n result = score([1, 3])\n test_suite.run_test(result, 3, '1')\n # Testing with non-empty hand\n result = score([1, 3, 1, 1])\n test_suite.run_test(result, 3, '2')\n # Testing with non-empty hand\n result = score([4, 3, 4, 3, 3])\n test_suite.run_test(result, 9, '3')\n\n # Show report\n test_suite.report_results()", "def _score(self, estimator, train, test):\n b = estimator.fit(self.A[train], self.b[train]).predict(self.A[test])\n return accuracy_score(self.b[test], b)", "def get_score(self, student_answers):\r\n pass", "def score(name):\r\n return (sorted(test).index(name)+1)*value(name)", "def score(self):\n return self.client.call('GET', self.name + 'score')", "def scoring(self):\n pass", "def get(self):\n score = self._evaluate(self.y_true, self.y_pred)\n\n return score", "def score(self, y_true, y_pred):\r\n pass", "def evaluate(self, dataset):\n success = 0\n for sample, labelVector, label in dataset.tests:\n if self.guessLabel(sample) == label:\n success += 1\n return success / len(dataset.tests)", "def test_get_score():\n\n assert sequence_threshold.get_score([]) == 0\n assert sequence_threshold.get_score(SortedSet()) == 0\n assert sequence_threshold.get_score(list(range(3, 36))) == 3\n assert sequence_threshold.get_score([10, 11, 12, 14, 16, 17]) == 10 + 14 + 16", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def score(self):\n raise NotImplementedError()", "def get_score(self):\r\n return self.lcp.get_score()", "def get_scores(self):\n return SklearnModel.evaluate_no_ground_truth_classifier_metrics(self.X_test, self.predictions)", "def get_score(self):\n return self.__score", "def evaluate(self):\n\n\t\tself.model_score = self.model.evaluate(self.x_test, self.y_test, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\t\treturn self.model_score", "def test_get_score(self):\r\n\r\n score_dict = self.get_score(True, 3, 3)\r\n\r\n # Score should be 1.0.\r\n self.assertEqual(score_dict[\"score\"], 1.0)\r\n\r\n # Testing score after data is stored in student_data_for_location in xmodule.\r\n _score_dict = self.peer_grading.get_score()\r\n\r\n # Score should be 1.0.\r\n self.assertEqual(_score_dict[\"score\"], 1.0)", "def score(self, data_test, labels_pred, is_train=False):\n return -np.log(np.clip(self.score_trust(data_test, labels_pred, is_train=is_train),\n sys.float_info.min, None))", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def score_dataset(X_train, X_valid, y_train, y_valid):\r\n model = RandomForestRegressor(n_estimators=100, random_state=0)\r\n model.fit(X_train, y_train)\r\n preds = model.predict(X_valid)\r\n score = mean_absolute_error(y_valid, preds)\r\n return score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def getScore(self):\r\n return self._score", "def score(self, predictions):\n return 0.", "def score(self, X, y, predict_results=None, style=\"accuracy\"):\n results = predict_results\n if results is None:\n results = np.reshape(self.predict(X)[0], np.shape(y))\n if style=='accuracy':\n correct = 0\n for scored, expected in zip(results, y):\n if scored == expected:\n correct += 1\n return 0 if len(results) == 0 else (correct / len(results)) * 100.0\n if style=='mse':\n summer = 0\n count = 0\n for scored, expected in zip(results, y):\n summer = summer + ((scored - expected) ** 2)\n count = count + 1\n return summer / count", "def get_score_from_test(gender, grade, test_name, test_result):\n score_map = TestSports[test_name.upper()] \\\n .value \\\n [Constants.SCORE_MAP] \\\n [Gender[gender.upper()]] \\\n [Grade[grade.upper()]]\n print(score_map)\n\n score = Student.get_score(score_map, test_result)\n print(score)", "def score(self, X, y):\n\n correct = sum(self.predict(X) == y)\n return float(correct) / len(y)", "def score(self):\n return self.aggregate(Sum('score')).values()[0] or 0", "def score(self,*val):\n if len(val):\n self._score = val[0]\n self.evaluated = 1\n else: self.evaluate()\n return self._score", "def get_score(self):\n return self.score", "def score(self, X, y):\n out = None\n ### YOUR CODE HERE\n pred = self.predict(X)\n assert pred.shape == y.shape\n out = ((pred-y)**2).mean()\n ### END CODE\n return out", "def evaluate(self, test_x, test_y):\n score = self._model.evaluate(test_x, test_y, verbose=self._verbose)\n print(\"Test score: \", score[0])\n print(\"Test accuracy: \", score[1])", "def test_score():\n\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.score(testing_features, testing_classes)\n assert False # Should be unreachable\n except ValueError:\n pass", "def calc_score(model, scorer, X, y_true):\n\n y_preds = model.predict(X)\n score = scorer(y_true, y_preds)\n\n return score", "def getScore(self):\n return self._score", "def get_score(self):\r\n return None", "def score(self):\n return None", "def evaluate(y_test, y_hat):\n score = np.sum(y_test==y_hat)/len(y_test)\n return score", "def score(self, X, y):\n\n u = ((y - self.predict(X)) ** 2).sum()\n v = ((y - np.mean(y)) ** 2).sum()\n score = 1 - u / v\n\n return score", "def get_polyscore(self,X_test=None,y_test=None,metric='adjusted_r2'):\n X = self.get_points()\n y_pred = self.get_polyfit(X)\n train_score = score(self._model_evaluations, y_pred,metric, X=X)\n if X_test is not None and y_test is not None:\n y_pred_test = self.get_polyfit(X_test)\n test_score = score(y_test,y_pred_test,metric,X=X_test)\n return train_score, test_score\n else:\n return train_score", "def get_score(self, a, b):\n ### FILL IN ###", "def score(self, x, y=None):\n _, logp = self.score_samples(x)\n return logp", "def score(self, X, y):\n X_pp = self.preprocessor.transform(X)\n # Score the model on the data here\n return(self.estimator.score(X_pp, y))", "def scoring(estimator, features_test, labels_test):\n pred = estimator.predict(features_test)\n p = metrics.precision_score(labels_test, pred, average='micro')\n r = metrics.recall_score(labels_test, pred, average='micro')\n if p > 0.3 and r > 0.3:\n return metrics.f1_score(labels_test, pred, average='macro')\n return 0", "def get_score(self):\n\n return self._score", "def test(self, idx_test):\n self.eval()\n output = self.predict()\n # output = self.output\n loss_test = F.nll_loss(output[idx_test], self.labels[idx_test])\n acc_test = utils.accuracy(output[idx_test], self.labels[idx_test])\n print(\"Test set results:\",\n \"loss= {:.4f}\".format(loss_test.item()),\n \"accuracy= {:.4f}\".format(acc_test.item()))\n return acc_test.item()", "def get_score(self) -> int:\n return self.rstate.score()", "def get_score(self):\n return float(self._score)", "def score(self, archi:ArchitectureNN):\n archi.fit_model(self.train_data, **self.train_params)\n \n return archi.compute_test_score(self.test_data)", "def score(self) -> int:\n return self._score", "def _score(self, ModifiedWeights):\r\n \r\n UnflattenedWeights = self._UnflattenWeights(WeightsStrucure = self.WeightsStrucure, ModifiedWeights = ModifiedWeights)\r\n self.KerasModels.set_weights(UnflattenedWeights)\r\n test_on_batch = self.KerasModels.test_on_batch(X_train, y_train, sample_weight=None) # return ['loss', 'acc']\r\n return test_on_batch[1]", "def score(self):\n return 1 if self.succeeded() else 0", "def score(self,ytest,how='score'):\n scores = []\n #iterate through each pred for each nn value\n for pred in self.ypred:\n sc = np.empty(pred.shape[1]) #need to store the scores\n\n for i in range(pred.shape[1]):\n\n p = pred[:,i]\n\n if how == 'score':\n sc[i] = utilities.score(p, ytest[:,i])\n\n if how == 'corrcoef':\n\n sc[i] = utilities.corrcoef(p, ytest[:,i])\n\n scores.append(sc)\n\n scores = np.vstack(scores)\n return scores", "def test(self, test_data_path):\n test_data = read_datafile(test_data_path)\n test_data = self.preprocessor.preprocess(test_data)\n self.run(test_data)\n if check_format(self.get_result_path()):\n thresholds, precisions, avg_precision, reciprocal_rank, num_relevant = evaluate(test_data_path,\n self.get_result_path())\n return avg_precision", "def evaluate(self, test_data):\r\n test_results = [(np.argmax(self.feedforward(x)), y)\r\n for (x, y) in test_data]\r\n #print(self.feedforward(test_data[0][0]))\r\n #print(test_data[0][1])\r\n return sum(int(x == y) for (x, y) in test_results)", "def __get_score(self):\n for pair in zip(self.nu[self.nu_idx:], self.sw[self.sw_idx:]):\n if pair[0] == pair[1]:\n self.score += 1\n else:\n break", "def get_score(self):\n for response in self.response_list:\n self.score += response.get_score", "def score(self) -> FAIRResultCommonScore:\n return self._score", "def score(self, X, y):\n ...", "def fit_score(estimator, train_data, test_data):\n estimator.fit(*train_data)\n return estimator.score(*test_data)", "def evaluate(self,test_data):\n test_results = [(np.argmax(self.feedforward(x)), y)\n for (x, y) in test_data]\n return sum(int(x == y) for (x, y) in test_results)", "def score_samples(self, X):\n ...", "def getScore(self):\n return sum(self.field)", "def get_score(self):\n return tuple(self.score)", "def score(self, X, y):\n return np.mean(y == self.predict(X))", "def score(self, params):\n\n if self.use_sqrt:\n return self.score_sqrt(params)\n else:\n return self.score_full(params)", "def score_all(results):\n Y = np.concatenate([results['%dtest'%n] for n in range(10)])\n print score(np.concatenate([results['%dtrain'%n] for n in range(10)]))\n print score(np.concatenate([results['%dtest'%n] for n in range(10)]))\n class_counts = np.asarray([(Y[:,0]==n).sum() for n in range(10)])\n return confusion_matrix(Y[:,0],Y[:,1]), class_counts", "def scores_(self):\n return self.predictor.scores_", "def test(self,dataset):\n outputs = self.use(dataset)\n \n costs = np.ones((len(outputs),1))\n # Compute classification error\n for xy,pred,cost in zip(dataset,outputs,costs):\n x,y = xy\n if y == pred[0]:\n cost[0] = 0\n\n return outputs,costs", "def match_score(self):\n return self._match_score", "def score_model(self, model, test_training, test_target):\n\n target_prediction = model.predict(test_training)\n from sklearn.metrics import classification_report\n if(self.VERBOSE):\n print(classification_report(test_target, target_prediction))\n\n return [\n f1_score(test_target, target_prediction, average='weighted'),\n precision_score(test_target, target_prediction, average='weighted'),\n recall_score(test_target, target_prediction, average='weighted')\n ]", "def score(self):\n result = 1\n one_node = self.cups.locate_node(1)\n a = one_node.next()\n b = a.next()\n\n result = a.value * b.value\n\n return result", "def test_score(self):\n reg = ElasticRegistration()\n reg.fit(self.unimodal_samples)\n score = reg.score(self.unimodal_samples)\n np.testing.assert_almost_equal(score, 0.9994225)", "def evaluate(self,test_data):\r\n\t\ttest_results= [(np.argmax(self.forwardprop(x)),y) for (x,y) in test_data]\r\n\t\treturn sum(int(x==y) for (x,y) in test_results)", "def get_score(self):\n files_flare = self.generate_flare_set()\n files_non_flare = self.generate_non_flare_set()\n timeseries = []\n y = []\n scores = {}\n column_mapping = self.__get_column_mapping()\n for col in tqdm(range(1, 25)):\n for file in tqdm(files_flare):\n s = Sample(\"FL\", file).get_data().iloc[:, col].values\n y.append(self.mapping[file[0]])\n timeseries.append(s)\n\n for file in tqdm(files_non_flare):\n s = Sample(\"NF\", file).get_data().iloc[:, col].values\n y.append(self.mapping[file[0]])\n timeseries.append(s)\n embed = self.get_embed_vector(timeseries)\n\n embed_y = KMeans(n_clusters=5).fit_predict(embed)\n y = np.array(y).flatten()\n scores[column_mapping[col]] = self.relevance_score(embed_y, y)\n timeseries = []\n y = []\n scores_data = pd.DataFrame.from_dict(scores, orient='index', columns=['Relevance Score']).sort_values(\n by='Relevance Score', ascending=False)\n return scores_data", "def score(self):\n self.set_idx()\n if self.idx:\n diffs = self.diffs()\n weights = self.weights\n return np.sum(weights * diffs) / np.sum(weights)\n else:\n return 0.0", "def evaluate(self, test_data):\n test_results = [(np.argmax(self.feed_forward(x)), y) for (x, y) in test_data]\n return sum(int(x == y) for (x, y) in test_results)" ]
[ "0.86668295", "0.7818388", "0.77717274", "0.77294904", "0.76660264", "0.76660264", "0.76569784", "0.73830205", "0.7382572", "0.7316532", "0.7287335", "0.72611284", "0.7224763", "0.7224763", "0.7218049", "0.7218049", "0.7199401", "0.7164144", "0.71476907", "0.71476907", "0.71476907", "0.7122294", "0.7112555", "0.7112099", "0.7096238", "0.70957667", "0.70882064", "0.7086338", "0.7083518", "0.706795", "0.704988", "0.7017643", "0.7013226", "0.6998448", "0.6996952", "0.6986899", "0.6967073", "0.6962792", "0.6959996", "0.69547063", "0.695145", "0.69498116", "0.6937778", "0.6937778", "0.6937778", "0.69372916", "0.69315106", "0.69281155", "0.69238275", "0.6904059", "0.68879604", "0.68844277", "0.68759", "0.6868415", "0.68496716", "0.6834346", "0.68315464", "0.6812223", "0.6805948", "0.68020093", "0.68018377", "0.6797905", "0.6793629", "0.6791574", "0.6788242", "0.6785646", "0.6783198", "0.67821884", "0.6772115", "0.6771324", "0.674832", "0.6741034", "0.672738", "0.67259973", "0.67207146", "0.6704151", "0.66936487", "0.66869193", "0.66635615", "0.66609776", "0.6656538", "0.6640379", "0.66385376", "0.6620495", "0.66195035", "0.66139966", "0.6606762", "0.6605174", "0.6604633", "0.6603755", "0.65994304", "0.65951174", "0.6593147", "0.65927213", "0.65890425", "0.65746623", "0.65733814", "0.6570737", "0.65682167", "0.656151" ]
0.8328699
1
Train the neural network using Adam optimizer
def train(self): input_size = len(self.inputs[0]) output_size = len(set(self.labels)) hidden_size_1 = 15 hidden_size_2 = 15 # One hot encode the labels encoder = LabelEncoder() encoder.fit(self.labels) enc_labels = encoder.transform(self.labels) enc_labels = np_utils.to_categorical(enc_labels) # Create the MLP model = Sequential() model.add(Dense(hidden_size_1, activation='relu', input_dim=input_size)) model.add(Dense(hidden_size_2, activation='relu')) model.add(Dense(output_size, activation='softmax')) # Compile model with optimizer and loss function model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # Train the model model.fit(self.inputs, enc_labels, steps_per_epoch=1000, epochs=20, verbose=2) self.model = model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trainNet():", "def train():\n rng = random.PRNGKey(0)\n\n # Get Zachary's karate club graph dataset.\n node_feats, node_labels, sources, targets = get_karate_club_data()\n\n # Create model and optimizer.\n _, initial_params = GNN.init(\n rng, node_x=node_feats, edge_x=None, sources=sources, targets=targets)\n model = nn.Model(GNN, initial_params)\n optimizer = optim.Adam(learning_rate=0.01).create(model)\n\n # Train for 20 iterations.\n for iteration in range(20):\n optimizer, loss = train_step(optimizer, node_feats, sources, targets)\n\n accuracy = eval_step( # Model is stored in `optimizer.target`.\n optimizer.target, node_feats, sources, targets, node_labels)\n\n print('iteration: %d, loss: %.4f, accuracy: %.2f'\n % (iteration+1, loss, accuracy * 100))", "def train():\n model.train()\n for batch_index, (xb, yb) in enumerate(train_dl):\n loss = loss_func(model(xb), yb)\n\n loss.backward()\n opt.step()\n opt.zero_grad()", "def train(self, X, y, lr, epoch, method='adam', quit=1e-4):\n if len(y.shape) == 1:\n y = y.reshape((-1, 1))\n if not (0 < lr < 1):\n raise self.ANNException('learning rate cannot be negative or exceeds 1')\n if epoch <= 0:\n raise self.ANNException('epoch must be postitive integer')\n if method == 'gd':\n for _ in range(epoch):\n nodes = self._forward(X)\n self._backpropagate(y, nodes, lr)\n elif method == 'adam':\n alpha = 0.1\n beta1 = 0.5\n beta2 = 0.999\n epsilon = 1e-8\n mt = np.zeros(shape=self.weight.shape)\n vt = np.zeros(shape=self.weight.shape)\n before_err = self._energy(X, y)\n for t in range(1, epoch+1):\n nodes = self._forward(X)\n gt = self._backpropagate(y, nodes, alpha, ret=True)\n mt = beta1*mt + (1-beta1)*gt\n vt = beta2*vt + (1-beta2)*gt**2\n mthat = mt / (1-np.power(beta1, t))\n vthat = vt / (1-np.power(beta2, t))\n self.weight -= alpha * mthat/(np.sqrt(vthat)+epsilon)\n after_err = self._energy(X, y)\n if 0 < after_err-before_err < quit:\n return\n else:\n before_err = after_err\n else:\n raise self.ANNException('only gd and adam optimizer are supported')", "def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=500,num_hidden_units_2=300,num_hidden_units_3=200,num_code_units=50):\n \n print(\"Making the model...\")\n network = model((None,200),200,num_hidden_units,num_hidden_units_2,num_hidden_units_3,num_code_units)\n print(\"Done!\")\n\n\n for tetrode_number in [10]:\n\n print(\"Loading the model parameters from {}\".format(MODEL_FILENAME+str(tetrode_number)))\n f = open(MODEL_FILENAME+str(tetrode_number),'r')\n all_param_values = pickle.load(f)\n f.close()\n # print(all_param_values)\n lasagne.layers.set_all_param_values(network, all_param_values)\n\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(dataset['data'].shape)\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n for i in range(NUM_EPOCHS):\n costs = []\n\n for start, end in zip(range(0, dataset['data'].shape[0], BATCH_SIZE), range(BATCH_SIZE, dataset['data'].shape[0], BATCH_SIZE)):\n cost = training['train'](dataset['data'][start:end],dataset['data'][start:end])\n costs.append(cost)\n\n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n # accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Training cost: {}\".format(i+1,meanTrainCost))\n # NUM_POINTS = 5000\n codes = training['code'](dataset['data'][0:NUM_POINTS])\n\n \n\n # y = set(list(d.predict(dataset['data'][0:NUM_POINTS])))\n\n # print(y)\n\n # activations_1 = training['activations_1'](dataset['data'][0:NUM_POINTS])\n # activations_2 = training['activations_2'](dataset['data'][0:NUM_POINTS])\n # codes = training['code'](dataset['data'][0:NUM_POINTS])\n # # print(codes.shape)\n # # codes_2d = bh_sne(codes)\n\n # for k in range(3):\n # print(k)\n\n # codes_2d = bh_sne(np.asarray(codes[:(k+1)*12000],dtype=np.float64))\n\n # # d = DPGMM(n_components=10, covariance_type='full')\n # d = DPGMM(n_components=15,n_iter=100)\n\n # d.fit(codes_2d[:(k+1)*12000])\n\n # hdp = d.predict_proba(codes_2d[:(k+1)*12000])\n\n # hdp_1d = [np.argmax(z) for z in hdp]\n\n # print(set(list(hdp_1d)))\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/hdp_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # m = TSNE(n_components=2, random_state=0)\n \n # # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # # activations_1_2d = bh_sne(activations_1)\n # # activations_2_2d = bh_sne(activations_2)\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS][:(k+1)*12000],alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # This is where the code for the video will go\n # ##############################################################################\n # # Compute DBSCAN\n # db = None\n # core_samples_mask = None\n # labels = None\n\n # num_labels = 0\n # eps=1.0\n # while(num_labels < 10):\n # db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n # core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n # core_samples_mask[db.core_sample_indices_] = True\n # labels = db.labels_\n # num_labels = np.amax(labels)\n # eps -= 0.1\n\n # print(\"Num learned labels: {}\".format(num_labels))\n\n # plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS][:(k+1)*12000],lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # # pickle.dump(labels, f)\n # # f.close()\n\n codes_2d = bh_sne(np.asarray(codes,dtype=np.float64),theta=0.4)\n\n # d = DPGMM(n_components=10, covariance_type='full')\n d = DPGMM(n_components=15,n_iter=1000)\n\n d.fit(codes_2d)\n\n hdp = d.predict_proba(codes_2d)\n\n hdp_1d = [np.argmax(z) for z in hdp]\n\n print(set(list(hdp_1d)))\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/hdp_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # m = TSNE(n_components=2, random_state=0)\n \n # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # activations_1_2d = bh_sne(activations_1)\n # activations_2_2d = bh_sne(activations_2)\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS],alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # This is where the code for the video will go\n ##############################################################################\n # Compute DBSCAN\n db = None\n core_samples_mask = None\n labels = None\n\n num_labels = 0\n eps=1.0\n while(num_labels < 10):\n db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n num_labels = np.amax(labels)\n eps -= 0.1\n\n print(\"Num learned labels: {}\".format(num_labels))\n\n plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS],lw=0)\n plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # pickle.dump(labels, f)\n # f.close()", "def train_network(self, optimizer, loss_func, num_iterations=1):\n losses = []\n start = time.time()\n for i in range(num_iterations):\n # Now run an iteration\n losses.append(self.net.single_iteration(self.Xtrain, self.Ttrain, optimizer, loss_func))\n print('Training took {} seconds'.format(time.time() - start))\n return losses", "def train(self):\n self.ae_train(self.net0, self.ae0_optimizer, self.train0_loader, self.val_loader, name='Net0')\n self.ae_train(self.net1, self.ae1_optimizer, self.train1_loader, self.val_loader, name='Net1')\n self.ae_train(self.net2, self.ae2_optimizer, self.train2_loader, self.val_loader, name='Net2')\n\n self.classifier_train(self.net0, self.optimizer0, self.train0_loader, self.val_loader, name='Net0')\n self.classifier_train(self.net1, self.optimizer1, self.train1_loader, self.val_loader, name='Net1')\n self.classifier_train(self.net2, self.optimizer2, self.train2_loader, self.val_loader, name='Net2')", "def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):\n session.run(optimizer, feed_dict={y: label_batch, x: feature_batch, keep_prob: keep_probability})", "def train(self, iterations = 100):\n arguments = ()\n print(\"training...\")\n results = optimize.minimize(self.CostFunction,x0 = self.Thetas, args = arguments, options = {'disp':True, 'maxiter': iterations}, method = \"L-BFGS-B\", jac = True)\n self.Thetas = results['x']\n FinalCost, _ = self.CostFunction(self.Thetas)\n print(\"successfully trained the model\") \n print(\"Final Cost for this model is:\", FinalCost)", "def train_neural_network(self, session, x, y, keep_prob, optimizer, keep_probability, feature_batch, label_batch):\n session.run(optimizer, feed_dict={x: feature_batch, y: label_batch, keep_prob: keep_probability})\n pass", "def _train(self, loss):\n config = ConfigParser.ConfigParser()\n config.read(\"config/conf.cfg\")\n\n learning_rate =float(config.get(\"Common Params\", \"learning_rate\"))\n moment = float(config.get(\"Common Params\", \"moment\"))\n opt = tf.train.AdamOptimizer()\n train_step = opt.minimize(loss)\n return train_step\n\n # grads = opt.compute_gradients(self.total_loss)\n\n # apply_gradient_op = opt.apply_gradients(grads, global_step=self.global_step)\n\n #return apply_gradient_op", "def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch, x, y, keep_prob):\n session.run(optimizer, feed_dict={x: feature_batch, y: label_batch, keep_prob: keep_probability})\n pass", "def train (self, data, targets, epochs = 100, etaDataLayer = 0.2, etaHiddenLayer = 0.2, hiddenLayers = 1, hiddenNeurons = 200, dataWeightsLimit = 0.05, hiddenWeightsLimit = 0.5, regression = True, backPropagate = False, verbose = False):\n\t\tself.data[\"input\"] \t\t= numpy.array(data)\n\t\tself.data[\"rows\"] \t\t= len(data)\n\t\tself.data[\"cols\"] \t\t= len(data[0])\n\t\tself.targets \t\t\t= targets\n\t\tself.classCount \t\t= len(set(targets))\n\t\tself.epochs \t\t\t= epochs\n\t\tself.etaDataLayer \t\t= float(etaDataLayer)\n\t\tself.etaHiddenLayer \t= float(etaHiddenLayer)\n\t\tself.hiddenLayers \t\t= hiddenLayers\n\t\tself.hiddenNeurons \t\t= hiddenNeurons\n\t\tself.dataWeightsLimit \t= float(dataWeightsLimit)\n\t\tself.hiddenWeightsLimit\t= float(hiddenWeightsLimit)\n\t\tself.regression \t\t= regression\n\t\tself.backPropagate \t\t= backPropagate\n\t\tself.verbose \t\t\t= verbose\n\t\tself.report \t\t\t= {}\n\t\tself.report[\"total\"] \t= self.epochs * len(data)\n\t\tself.dataWeights \t\t= self.dataWeightsLimit * numpy.random.random_sample((self.data[\"cols\"], self.hiddenNeurons))\n\t\tself.hiddenWeights \t\t= self.hiddenWeightsLimit * numpy.random.random_sample((self.hiddenNeurons + 1, 1))\n\t\tprint \"========================================\"\n\t\tprint \"--- Training . . . ---------------------\"\n\t\tprint \"========================================\"\n\t\tfor epoch in range(self.epochs):\n\t\t\thits \t\t\t\t= 0\n\t\t\tmisses \t\t\t\t= 0\n\t\t\tprocessed \t\t\t= 0\n\t\t\tdistances \t\t\t= []\n\t\t\tpredictedClass \t\t= None\n\t\t\tsampleIndices \t\t= sorted(range(len(self.data[\"input\"])), key = lambda k: random.random())\n\t\t\tif self.verbose == True:\n\t\t\t\tprint \"----------------------------------------\"\n\t\t\t\tprint \"--- Epoch\", epoch\n\t\t\t\tprint \"----------------------------------------\"\n\t\t\tfor sampleIndex in sampleIndices:\n\t\t\t\tprint self.dataWeights\n\t\t\t\tsample \t\t\t= self.data[\"input\"][sampleIndex]\n\t\t\t\ttarget \t\t\t= self.targets[sampleIndex]\n\t\t\t\tif self.verbose == True:\n\t\t\t\t\tprint \"--- Feeding Forward . . . --------------\"\n\t\t\t\t\tprint \" Sample\", sampleIndex + 1, \"of Epoch\", epoch + 1\n\t\t\t\t# Forward Propagation\n\t\t\t\ta \t\t\t\t= 1.0 / (1.0 + numpy.exp(- numpy.dot(sample, self.dataWeights)))\n\t\t\t\tb \t\t\t\t= numpy.concatenate([[1], a])\n\t\t\t\toutput \t\t\t= 1.0 / (1.0 + numpy.exp(- numpy.dot(b, self.hiddenWeights)))[0]\n\t\t\t\t# Metric Computation & Communication\n\t\t\t\tif self.regression == False:\n\t\t\t\t\terror \t\t\t= 0.5 * (((target / (self.classCount - 1)) - output) ** 2)\n\t\t\t\t\tdistance \t\t= abs(target - (output * (self.classCount - 1)))\n\t\t\t\t\tpredictedClass \t= round(self.classCount * output) - 1\n\t\t\t\t\tif predictedClass == target:\n\t\t\t\t\t\thits += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tmisses += 1\n\t\t\t\t\tif self.verbose == True:\n\t\t\t\t\t\tprint \" Annotated Class: \\t\", target\n\t\t\t\t\t\tprint \" Computed Class: \\t\", predictedClass\n\t\t\t\t\t\tprint \" Computed SSE: \\t\\t%0.4f\" % error\n\t\t\t\t\t\tprint \" Raw Distance: \\t\\t%0.4f\" % distance\n\t\t\t\t\t\tif predictedClass == target:\n\t\t\t\t\t\t\tprint \" Prediction Status: \\tHit! :)\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint \" Prediction Status: \\tOops! :(\"\n\n\t\t\t\telse:\n\t\t\t\t\terror \t\t\t= 0.5 * ((target - output) ** 2)\n\t\t\t\t\tdistance \t\t= abs(target - output)\n\t\t\t\t\tif self.verbose == True:\n\t\t\t\t\t\tprint \" Annotated Value: \\t\", target\n\t\t\t\t\t\tprint \" Computed Value: \\t%0.4f\" % output\n\t\t\t\t\t\tprint \" Computed SSE: \\t\\t%0.4f\" % error\n\t\t\t\t\t\tprint \" Raw Distance: \\t\\t%0.4f\" % distance\n\t\t\t\tprocessed += 1\n\t\t\t\tdistances.append(distance)\n\t\t\t\t# Back Propagation\n\t\t\t\tif self.verbose == True:\n\t\t\t\t\tprint \"--- Back Propagating . . . -------------\"\n\t\t\t\tdeltaDataWeights \t= ((target / self.classCount - 1) - output) * output * (1 - output)\n\t\t\t\tdeltaHiddenWeights \t= numpy.delete((b * (1 - b) * self.hiddenWeights * deltaDataWeights)[0], 0.0)\n\t\t\t\tupdateHiddenWeights = etaHiddenLayer * b * deltaDataWeights\n\t\t\t\tupdatedHiddenLayer \t= b + updateHiddenWeights\n\t\t\t\tself.hiddenWeights \t= numpy.transpose(numpy.atleast_2d(updatedHiddenLayer))\n\t\t\t\tupdateDataWeights \t= etaDataLayer * numpy.outer(sample, deltaHiddenWeights)\n\t\t\t\tself.dataWeights \t= self.dataWeights + updateDataWeights\n\t\t\t\tprint self.dataWeights\n\t\t\t\tif self.verbose == True:\n\t\t\t\t\tprint \"--- Sample Processed -------------------\\n\"\n\n\t\t\t\t# updateDataWeights \t= etaDataLayer * numpy.array(sample) * deltaHiddenWeights\n\n\t\t\t\t# # Multiple Hidden Layer Routine\n\t\t\t\t# for hiddenLayer in range(hiddenLayers):\n\t\t\tif self.verbose == True:\n\t\t\t\tprint \"----------------------------------------\"\n\t\t\t\tprint \"--- Epoch\", epoch, \"Complete\"\n\t\t\t\tprint \"----------------------------------------\"\n\t\t\t\tif self.regression == False:\n\t\t\t\t\taccuracy \t\t= (hits/processed) * 100\n\t\t\t\t\tprint \" \tEpoch Hits / Total:\\t\", hits, \"/\", processed\n\t\t\t\t\tprint \" \tEpoch Hit Percent:\\t%0.2f\" % (float(hits) / processed * 100), \"\\n\"\n\t\t\t\telse:\n\t\t\t\t\tprint \"\\n\"\n\t\tprint \"========================================\"\n\t\tprint \"--- Training Complete ------------------\"\n\t\tprint \"========================================\"", "def train(self, num_epochs: int):\n learn_rate = 0.02\n\n images, labels = self._mn_data.load_training()\n indices = [i for i in range(len(images))]\n\n for epoch in range(num_epochs):\n random.shuffle(indices) # Avoids modifying the actual lists\n epoch_cost = 0\n i = 0\n\n # Go through the training data in batches\n while i < len(indices):\n print(i, \"---------------------------------------------------------\")\n\n if i >= 800:\n break\n\n start = i\n end = i + batch_size\n batch_indices = indices[start:end]\n\n dw = [[[0 for _ in range(perceptron.size_w())] for perceptron in layer] for layer in self._network]\n db = [[0 for _ in layer] for layer in self._network]\n\n # Take a single image from the batch\n for index in batch_indices:\n # print(\"ex\")\n result = self.feed_forward(images[index])\n epoch_cost += self.cost(result, labels[index]) # Creates self._desired_changes\n\n # Backpropagate starting from the last (output) layer\n for j in range(len(self._network)-1, -1, -1):\n layer = self._network[j]\n prev_act_values = self._layer_inputs[j]\n function_name = layer[0].get_activation().name()\n\n if j > 0:\n next_desired_changes = [0.0 for _ in self._network[j-1]]\n else:\n next_desired_changes = None\n\n if function_name == \"relu\":\n leakage = self._relu.get_leakage()\n\n # Look at each perceptron\n for k in range(len(layer)):\n perceptron = layer[k]\n dc_da = self._desired_changes[k]\n\n if function_name == \"sigmoid\":\n dc_da *= self._sigmoid(perceptron.z) * (1 - self._sigmoid(perceptron.z))\n # print(perceptron.z, sig_delta)\n # print(dc_da)\n db[j][k] -= dc_da * learn_rate\n\n # For each weight\n for l in range(len(perceptron.weights)):\n dw[j][k][l] -= dc_da * prev_act_values[l] * learn_rate\n\n if next_desired_changes:\n next_desired_changes[l] += dc_da * perceptron.weights[l]\n\n elif function_name == \"relu\":\n dc_da *= leakage if perceptron.z < 0 else 1\n db[j][k] -= dc_da * learn_rate\n\n # For each weight\n for l in range(len(perceptron.weights)):\n dw[j][k][l] -= dc_da * prev_act_values[l] * learn_rate\n\n if next_desired_changes:\n next_desired_changes[l] += dc_da * perceptron.weights[l]\n\n # print(\"dcda\", dc_da)\n\n if next_desired_changes:\n # print(\"nd\", next_desired_changes)\n self._desired_changes = next_desired_changes\n\n # End of sample image loop\n # print(dw[1:])\n # break\n\n # Update weights and biases\n for j in range(len(self._network)):\n layer = self._network[j]\n\n for k in range(len(layer)):\n perceptron = layer[k]\n\n perceptron.change_weights_and_bias(dw[j][k], db[j][k])\n\n # print(dw[1:])\n # print(db)\n\n i += batch_size\n\n print(\"Epoch {} completed out of {} with loss {}\".format(epoch + 1, num_epochs, epoch_cost))", "def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):\n # TODO: Implement Function\n session.run(optimizer, feed_dict={x:feature_batch, y:label_batch, keep_prob:keep_probability})", "def train(epochs, batch_size, lr, verbose):\n # autograd globally off\n torch.set_grad_enabled(False)\n # generate training and testing datasets\n train_data, train_label = generate_data()\n test_data, test_label = generate_data()\n # normalize data be centered at 0\n train_data, test_data = normalize(train_data, test_data)\n\n if verbose:\n print(\"--- Dataset ---\")\n print(\"Train X: \", train_data.size(), \" | Train y: \", train_label.size())\n print(\" Test X: \", test_data.size(), \" | Test y: \", test_label.size())\n\n layers =[]\n # input layer (2 input units)\n linear1 = Linear(2, 25, bias= True, weight_init=xavier_uniform)\n\n # 3 hidden layers (each 25 units)\n linear2 = Linear(25, 25, bias= True, weight_init=xavier_uniform)\n linear3 = Linear(25, 25, bias= True, weight_init=xavier_uniform)\n linear4 = Linear(25, 25, bias= True, weight_init=xavier_uniform)\n\n # output layer (2 output units)\n linear5 = Linear(25, 2, bias= True, weight_init=xavier_uniform)\n\n\n layers.append(linear1)\n layers.append(Relu())\n layers.append(linear2)\n layers.append(Relu())\n layers.append(linear3)\n layers.append(Relu())\n layers.append(linear4)\n layers.append(Tanh())\n layers.append(linear5)\n\n model = Sequential(layers)\n if verbose: print(\"Number of model parameters: {}\".format(sum([len(p) for p in model.param()])))\n\n criterion = MSE()\n optimizer = SGD(model, lr=lr)\n\n train_losses, test_losses = [], []\n train_accuracies, test_accuracies = [], []\n train_errors, test_errors = [], []\n\n if verbose: print(\"--- Training ---\")\n for epoch in range(1, epochs+1):\n if verbose:print(\"Epoch: {}\".format(epoch))\n\n # TRAINING\n for batch_idx in range(0, train_data.size(0), batch_size):\n # axis 0, start from batch_idx until batch_idx+batch_size\n output = model.forward(train_data.narrow(0, batch_idx, batch_size))\n\n # Calculate loss\n loss = criterion.forward(output, train_label.narrow(0, batch_idx, batch_size))\n train_losses.append(loss)\n if verbose: print(\"Train Loss: {:.2f}\".format(loss.item()))\n\n # put to zero weights and bias\n optimizer.zero_grad()\n\n ## Backpropagation\n # Calculate grad of loss\n loss_grad = criterion.backward()\n\n # Grad of the model\n model.backward(loss_grad)\n\n # Update parameters\n optimizer.step()\n\n train_prediction = model.forward(train_data)\n acc = accuracy(train_prediction, train_label)\n train_accuracies.append(acc)\n train_errors.append(1-acc)\n if verbose: print(\"Train Accuracy: {:.2f}\".format(acc.item()))\n\n # EVALUATION\n for batch_idx in range(0, test_data.size(0), batch_size):\n # axis 0, start from batch_idx until batch_idx+batch_size\n output = model.forward(test_data.narrow(0, batch_idx, batch_size))\n\n # Calculate loss\n loss = criterion.forward(output, test_label.narrow(0, batch_idx, batch_size))\n test_losses.append(loss)\n if verbose: print(\"Test Loss: {:.2f}\".format(loss.item()))\n\n test_prediction = model.forward(test_data)\n acc = accuracy(test_prediction, test_label)\n test_accuracies.append(acc) \n test_errors.append(1-acc)\n if verbose: print(\"Test Accuracy: {:.2f}\".format(acc.item()))\n\n return train_losses, test_losses, train_accuracies, test_accuracies, train_errors, test_errors", "def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):\n # TODO: Implement Function \n \n session.run(optimizer,feed_dict={x:feature_batch,y:label_batch, keep_prob:keep_probability})\n \n pass", "def train(self, x_train: np.ndarray, y_train: np.ndarray, num_epochs=10, verbose=True):\n self._prepropagation_check(x_train, y_train)\n\n def printv(t): not verbose or print(t)\n\n # If the dataset only consists of one example, it is represented as a vector\n # If it is the case, we change it to be a matrix so that the processing is the same\n if len(x_train.shape) == 1:\n x_train = x_train[:, np.newaxis]\n y_train = y_train[:, np.newaxis]\n\n n_sample = x_train.shape[1]\n\n printv(f\"Training the network for the {self._times_trained+1} time\")\n\n for n_epoch in range(1, num_epochs + 1):\n printv(f\"| Epoch {n_epoch} / {num_epochs}\")\n\n accuracy, cost = 0., 0.\n\n for n_b, batch_indices in enumerate(self._batcher(self.batch_size, n_sample)):\n x_batch = x_train[:, batch_indices]\n y_batch = y_train[:, batch_indices]\n\n y_hat = self._forward_propagation(x_batch)\n\n y_pred = one_hot(y_hat.argmax(axis=0))\n accuracy = np.mean(1 * (y_pred == y_batch))\n\n cost = self._output_layer.cost(y_hat, y_batch)\n\n assert y_hat.shape[0] == self._output_layer.size\n assert y_batch.shape[0] == self._output_layer.size\n\n self._back_propagation(y_batch)\n self._optimize()\n\n self.logger.log_cost_accuracy(n_epoch, cost, accuracy)\n\n self._times_trained += 1\n\n return self", "def train(self):\r\n raw_dataset = pd.read_csv(self.datafile, sep = ',', header = 0,\r\n na_values = '?', comment = '\\t',\r\n skipinitialspace = True)\r\n\r\n dataset = raw_dataset.copy()\r\n dataset.tail()\r\n\r\n # Clear unknown values\r\n dataset.isna().sum()\r\n dataset = dataset.dropna()\r\n\r\n # takes a sample of 80% of the data points\r\n train_dataset = dataset.sample(frac = 0.8, random_state = 0)\r\n test_dataset = dataset.drop(train_dataset.index)\r\n\r\n # Split features from labels for training and test datasets\r\n train_features = train_dataset.copy()\r\n test_features = test_dataset.copy()\r\n train_labels = train_features.pop('Quality')\r\n test_labels = test_features.pop('Quality')\r\n\r\n # normalize data\r\n normalizer = preprocessing.Normalization()\r\n normalizer.adapt(np.array(train_features))\r\n\r\n # builds the model\r\n def build_and_compile_model(norm):\r\n model = keras.Sequential([\r\n norm,\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(1)\r\n ])\r\n\r\n model.compile(loss='mean_absolute_error',\r\n optimizer=tf.keras.optimizers.Adam(0.001))\r\n return model\r\n\r\n deep_neural_network_model = build_and_compile_model(normalizer)\r\n\r\n history = deep_neural_network_model.fit(\r\n train_features, train_labels,\r\n validation_split=0.2,\r\n verbose=0, epochs=100)\r\n\r\n deep_neural_network_model.save('deep_neural_network_model')", "def _add_train_op(self):\n with tf.device(\"/gpu:0\"):\n learning_rate_D = 0.0004 # tf.train.exponential_decay(0.001, self.global_step_D,\n # 100000, 0.96, staircase=True)\n learning_rate_G = 0.0004 # tf.train.exponential_decay(0.001, self.global_step_G,\n # 100000, 0.96, staircase=True)\n learning_rate_D_in = 0.0004 # tf.train.exponential_decay(0.001, self.global_step_D,\n # 100000, 0.96, staircase=True)\n self._train_op_D = tf.train.AdamOptimizer(learning_rate_D,beta1=0.5).minimize(self._D_loss,\n global_step=self.global_step_D,\n var_list=self.discriminator._theta)\n self._train_op_D_in = tf.train.AdamOptimizer(learning_rate_D_in,beta1=0.5).minimize(self._D_in_loss,\n global_step=self.global_step_D_in,\n var_list=self.discriminator_inner._theta)\n\n self._train_op_G = tf.train.AdamOptimizer(learning_rate_G,beta1=0.5).minimize(self._G_loss,\n global_step=self.global_step_G,\n var_list=self.generator._theta)", "def train(self, verbose=True):\n\n\n learned = False\n iteration = 0\n\n from util.loss_functions import DifferentError\n loss = DifferentError()\n\n\n\n\n\n # Train for some epochs if the error is not 0\n while not learned:\n # x ist ein Bild bestehend aus einem Label (erster Eintrag) und 784 Pixeln\n # t ist das Zielergebnis von x (überprüfbar mit dem Label)\n # o ist der tatsächliche Ergebnis von x\n # w ist der Gewichtsvektor\n # Als Aktivierungsfunktion verwenden wir die Sigmoid Funktion\n # Das Training wird dann beendet, sobald das Fehlerkriterium konvergiert\n\n totalError = 0\n\n output = []\n labels = self.trainingSet.label\n inputs = self.trainingSet.input\n\n # iteriere für jede Instanz im Trainingsset x € X\n for input in inputs:\n # Ermittle O_x = sig(w*x)\n output.append(self.fire(input))\n\n # Ermittle Fehler AE = tx - ox\n error = loss.calculateError(np.array(labels), np.array(output))\n\n # grad = [0]\n grad = np.zeros(len(self.trainingSet.input[0]))\n grad2 = np.zeros(len(self.trainingSet.input[0]))\n\n for e, input, out in zip(error, inputs, output):\n activationPrime = Activation.getDerivative(activationName)(np.dot(np.array(input), self.weight))\n #grad += np.multiply( np.multiply( input, e), activationPrime)\n grad += np.multiply( input, e)\n\n # Update grad = grad + errorPrime * x * activationPrime\n\n\n\n # print grad - grad2\n #print \"Error: \" + str(error) + \" Grad: \" + str(grad)\n\n # update w: w <- w + n*grad\n self.updateWeights(grad)\n\n\n iteration += 1\n totalError = error.sum()\n\n if verbose:\n logging.info(\"Epoch: %i; Error: %i\", iteration, totalError)\n\n if abs(totalError) < 0.01 or iteration >= self.epochs:\n # stop criteria is reached\n learned = True\n\n pass", "def train(self):\n #learning_rate = tf.train.exponential_decay(self.learning_rate, self.global_step, self.decay_steps,self.decay_rate, staircase=True) #去掉decay_steps\n train_op = tf.contrib.layers.optimize_loss(self.losses, global_step=self.global_step, learning_rate=self.learning_rate, optimizer=\"Adam\")\n return train_op", "def train_network(self, batch_size, epochs):\n\n if self.eq_train: self.model.fit([self.X_train_high_level, self.X_train_low_level], self.y_train, epochs=epochs, batch_size=batch_size, sample_weight=self.train_weights_eq) \n else: self.model.fit([self.X_train_high_level, self.X_train_low_level], self.y_train, epochs=epochs, batch_size=batch_size, sample_weight=self.train_weights)", "def train(self, X, y, batch_size=5, num_epochs=10, alpha=0.1, gamma=0.9, learning=\"Delta\"):\n rem = int(np.ceil(len(X[0])/batch_size))\n for epoch in range(num_epochs):\n art = 0;\n for sample in range(rem):\n end = art + batch_size\n\n # Get a sample (column from X and Y) where the size of the sample is given by the batch size\n sampleX = X[:, art : end]\n sampleY = y[:, art : end]\n #print (sampleX)\n\n # Get the prediction\n results = self.predict(sampleX)\n art += batch_size\n\n if learning == \"Delta\" or learning == \"delta\":\n # Calculate e\n e = np.subtract(sampleY, results)\n\n # Calculate e dot p, where p is the input matrix\n ep = np.dot(e, np.transpose(sampleX))\n\n # Multiply this new matrix by the scalar alpha\n aep = np.multiply(alpha, ep)\n\n # Calculate the new weights along with the bias\n self.weights = np.add(self.weights, aep)\n \n elif learning == \"Filtered\" or learning == \"filtered\":\n\n # Calculate e dot p, where p is the input matrix\n ep = np.dot(sampleY, np.transpose(sampleX))\n\n # Multiply this new matrix by the scalar alpha\n aep = np.multiply(alpha, ep)\n\n # Multiply the old weights by some scalar gamma\n gw = np.multiply(1 - gamma, self.weights)\n\n self.weights = np.add(gw, aep)\n\n elif learning == \"Unsupervised_hebb\" or learning == \"unsupervised_hebb\":\n # Add a row of one's to the top of the input matrix\n #newX = np.vstack((np.array([1 for column in range(sampleX.shape[1])]), sampleX))\n\n # Calculate e dot p, where p is the input matrix\n ep = np.dot(results, np.transpose(sampleX))\n\n # Multiply this new matrix by the scalar alpha\n aep = np.multiply(alpha, ep)\n\n # Calculate the new weights along with the bias\n self.weights = np.add(self.weights, aep)", "def train(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n train_state = self.train_state\r\n optimizer = self.optimizer\r\n scheduler = self.scheduler\r\n train_bar = self.train_bar\r\n val_bar = self.val_bar\r\n epoch_bar = self.epoch_bar\r\n \r\n for epoch_index in range(args.num_epochs):\r\n train_state['epoch_index'] = epoch_index\r\n \r\n # Iterate over training dataset\r\n \r\n running_loss,running_acc = self.train_loop(epoch_index, args, \r\n model, dataset, \r\n optimizer, train_bar)\r\n \r\n train_state['train_loss'].append(running_loss)\r\n train_state['train_acc'].append(running_acc)\r\n \r\n running_loss,running_acc = self.val_loop(epoch_index, args, model, \r\n dataset, optimizer, val_bar)\r\n \r\n \r\n train_state['val_loss'].append(running_loss)\r\n train_state['val_acc'].append(running_acc)\r\n \r\n print(\"Epoch \"+str(epoch_index+1)+\": Running loss=\"+ \\\r\n str(running_loss)+\", Running Acc=\"+str(running_acc))\r\n \r\n train_state = update_train_state(args=args, model=model, \r\n train_state=train_state)\r\n \r\n scheduler.step(train_state['val_loss'][-1])\r\n \r\n if train_state['stop_early']:\r\n break\r\n \r\n train_bar.n = 0\r\n val_bar.n = 0\r\n epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'] )\r\n epoch_bar.update()\r\n \r\n state_dict = torch.load(train_state['model_filename'])\r\n model.load_state_dict(state_dict)\r\n return model", "def train(self, examples):\n optimizer = optim.Adam(self.nnet.parameters(),\n lr=1e-7,\n weight_decay=1e-7\n )\n average_loss = 0\n total_batch_num = 0\n for epoch in range(self.epoch_num):\n epoch_loss = 0\n batch_idx = 0\n while batch_idx < int(len(examples)/args.batch_size):\n ids = np.random.randint(len(examples), size=args.batch_size)\n state, policy, v = list(zip(*[examples[i] for i in ids]))\n\n state = torch.Tensor(np.array(state)).contiguous().cuda()\n target_policy = torch.Tensor(\n np.array(policy)).contiguous().cuda()\n target_v = torch.Tensor(np.array(v)).contiguous().cuda()\n\n # predict\n self.nnet.eval()\n out_policy, out_v = self.nnet(state)\n self.nnet.train()\n\n total_loss = self.loss(\n target_policy, out_policy, target_v, out_v)\n '''\n print(\"state:\\n {}\".format(state[3]))\n print(\"policy:\\n {}\".format(target_policy[3]))\n print(\"nn_policy:\\n {}\".format(out_policy[3]))\n '''\n\n average_loss += abs(np.sum(total_loss.cpu().data.numpy()))\n epoch_loss += abs(np.sum(total_loss.cpu().data.numpy()))\n # print(\"loss in batch {} is {}\".format(batch_idx, total_loss.cpu().data.numpy()))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n total_loss.sum().backward()\n optimizer.step()\n\n batch_idx += 1\n total_batch_num += 1\n print('epoch: {}, loss: {}'.format(epoch, epoch_loss/batch_idx))\n self.nnet.eval()\n return average_loss / total_batch_num", "def train(\n self,\n epochs: int = 200,\n batch_size: int = 256,\n latent_dim: int = 100,\n learning_rate: float = 0.001,\n momentum: float = 0.5,\n patience: int = 10,\n resolution: str = \"10min\",\n ):\n self.batch_size = batch_size\n self.latent_dim = latent_dim\n self.learning_rate = learning_rate\n self.momentum = momentum\n self.patience = patience\n optimizer = optimizers.Adam(learning_rate, momentum)\n # Test size is 2 out of 10, valid size is 1 out of remaining 8.\n test_size = 1 / 5\n valid_size = 1 / 8\n train_set, _ = self.dataset.train_test_split(\n test_size=test_size, min_points=10, min_trajectories=10, resolution=resolution\n )\n x, _, _ = self.preprocess(train_set)\n # Train-valid split\n n = x.shape[0]\n idx = np.random.permutation(n)\n valid_n = np.ceil(n * valid_size).astype(int)\n valid_idx, train_idx = idx[:valid_n], idx[valid_n:]\n x_train, x_valid = x[train_idx, :], x[valid_idx, :]\n # build the network\n self.gen, self.dis, self.gan = build_gan(optimizer, self.timesteps, self.vocab_sizes)\n exp_name = f\"{type(self).__name__}_{type(self.dataset).__name__}\"\n hparams = dict(epochs=epochs, batch_size=batch_size, latent_dim=latent_dim)\n start_time = log_start(LOG, exp_name, **hparams, resolution=resolution)\n start_time_str = start_time.strftime(\"%Y-%m-%dT%H:%M:%S\")\n exp_path = Path(f\"experiments/{exp_name}/{start_time_str}\")\n train_model(\n exp_name,\n self.gen,\n self.dis,\n self.gan,\n x_train,\n x_valid,\n self.vocab_sizes,\n patience=patience,\n start_time=start_time_str,\n **hparams,\n )\n self.trained_epochs += epochs\n log_end(LOG, exp_name, start_time)\n self.save(f\"{exp_path}/saved_model\")", "def train2(self):\n for epoch in range(self.epochs):\n print \"epoch: \", epoch\n self.train(self.D)\n self.alpha -= 0.002 # decrease the learning rate\n self.min_alpha = model.alpha # fix the learning rate, no decay", "def train(self, sess, observations, action_gradients, is_training=True):\n sess.run(self.optim, {\n self.observations: observations,\n self.action_gradients: action_gradients,\n self.is_training: is_training\n })", "def train(self):\n train_dataloader = self.get_train_dataloader()\n\n if self.args.max_steps > 0:\n t_total = self.args.max_steps\n num_train_epochs = (\n self.args.max_steps // (len(train_dataloader) // self.args.gradient_accumulation_steps) + 1\n )\n else:\n t_total = int(len(train_dataloader) // self.args.gradient_accumulation_steps * self.args.num_train_epochs)\n num_train_epochs = self.args.num_train_epochs\n\n lr_scheduler = orttrainer.optim.LinearWarmupLRScheduler(t_total, self.args.warmup_steps / float(t_total))\n\n loss_scaler = amp.DynamicLossScaler() if self.args.fp16 else None\n device = self.args.device.type\n\n device = f\"{device}:{self.args.device.index}\" if self.args.device.index else f\"{device}:0\"\n options = orttrainer.ORTTrainerOptions(\n {\n \"batch\": {\"gradient_accumulation_steps\": self.args.gradient_accumulation_steps},\n \"device\": {\"id\": device},\n \"mixed_precision\": {\"enabled\": self.args.fp16, \"loss_scaler\": loss_scaler},\n \"debug\": {\n \"deterministic_compute\": True,\n },\n \"utils\": {\"grad_norm_clip\": False},\n \"distributed\": {\n # we are running single node multi gpu test. thus world_rank = local_rank\n # and world_size = self.args.n_gpu\n \"world_rank\": max(0, self.args.local_rank),\n \"world_size\": int(self.world_size),\n \"local_rank\": max(0, self.args.local_rank),\n \"allreduce_post_accumulation\": True,\n },\n \"lr_scheduler\": lr_scheduler,\n }\n )\n\n param_optimizer = list(self.model.named_parameters())\n params = [\n {\n \"params\": [n for n, p in param_optimizer if \"bias\" in n or \"LayerNorm.weight\" in n],\n \"weight_decay_mode\": 1,\n },\n {\n \"params\": [n for n, p in param_optimizer if not (\"bias\" in n or \"LayerNorm.weight\" in n)],\n \"weight_decay_mode\": 1,\n },\n ]\n\n optim_config = optim.AdamConfig(params=params, lr=2e-5, do_bias_correction=True)\n self.model = orttrainer.ORTTrainer(self.model, self.model_desc, optim_config, options=options)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataloader.dataset))\n logger.info(\" Num Epochs = %d\", num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", self.args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n self.args.train_batch_size\n * self.args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", self.args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n\n tr_loss = 0.0\n logging_loss = 0.0\n train_iterator = trange(\n epochs_trained,\n int(num_train_epochs),\n desc=\"Epoch\",\n disable=self.args.local_rank not in [-1, 0],\n )\n\n for _epoch in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=self.args.local_rank not in [-1, 0])\n for step, inputs in enumerate(epoch_iterator):\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n tr_loss += self._training_step(self.model, inputs)\n\n if (step + 1) % self.args.gradient_accumulation_steps == 0 or (\n len(epoch_iterator) <= self.args.gradient_accumulation_steps and (step + 1) == len(epoch_iterator)\n ):\n global_step += 1\n\n if self.args.local_rank in [-1, 0]:\n if (self.args.logging_steps > 0 and global_step % self.args.logging_steps == 0) or (\n global_step == 1 and self.args.logging_first_step\n ):\n logs = {}\n if self.args.evaluate_during_training:\n results = self.evaluate()\n for key, value in results.items():\n eval_key = f\"eval_{key}\"\n logs[eval_key] = value\n\n loss_scalar = (tr_loss - logging_loss) / self.args.logging_steps\n\n logs[\"loss\"] = loss_scalar\n logging_loss = tr_loss\n\n epoch_iterator.write(json.dumps({**logs, **{\"step\": global_step}}))\n\n if self.args.max_steps > 0 and global_step > self.args.max_steps:\n epoch_iterator.close()\n break\n if self.args.max_steps > 0 and global_step > self.args.max_steps:\n train_iterator.close()\n break\n\n logger.info(\"\\n\\nTraining completed. \\n\\n\")\n return TrainOutput(global_step, tr_loss / global_step)", "def _train(self, dataset):\n net = buildNetwork(\n dataset.params_length,\n self._default_hidden_layers,\n 1 # a binary classifier only requires one output layer\n )\n ds = SupervisedDataSet(dataset)\n trainer = BackpropTrainer(net, ds)\n trainer.trainUntilConvergence()\n net.activate(params.as_serialized)", "def prep_train(lr=0.0002, nz=100):\n\tG,D=build_net(nz=nz)\n\n\tx = T.tensor4('x')\n\tz = T.matrix('z')\n\n\t#Get outputs G(z), D(G(z))) and D(x)\n\tG_z=get_output(G,z)\n\tD_G_z=get_output(D,G_z)\n\tD_x=get_output(D,x)\n\n\t# test samples\n\tsamples=get_output(G,z,deterministic=True)\n\n\t#Get parameters of G and D\n\tparams_d=get_all_params(D, trainable=True)\n\tparams_g=get_all_params(G, trainable=True)\n\n\t#Calc loss and updates\n\tJ_D = bce(D_x,T.ones_like(D_x)).mean() + bce(D_G_z,T.zeros_like(D_G_z)).mean() #mean over a batch\n\tJ_G = bce(D_G_z, T.ones_like(D_G_z)).mean() #mean over a batch (\"stronger gradients in early training\")\n\n\tgrad_d=T.grad(J_D,params_d)\n\tgrad_g=T.grad(J_G,params_g)\n\n\tupdate_D = sgd(grad_d,params_d, learning_rate=lr)\n\tupdate_G = adam(grad_g,params_g, learning_rate=lr)\n\n\t#theano train functions\n\ttrain_fns={}\n\ttrain_fns['gen']=theano.function(inputs=[z], outputs=J_G, updates=update_G)\n\ttrain_fns['dis']=theano.function(inputs=[x,z], outputs=J_D, updates=update_D)\n\n\t#theano test functions\n\ttest_fns={}\n\ttest_fns['sample']=theano.function(inputs=[z],outputs=samples)\n\n\treturn train_fns, test_fns, G, D", "def train_network(self):\n batch = self.memory.sample(self.batch_size)\n inputs = np.array([b[\"state\"] for b in batch]) #####\n actions = np.array([b[\"action\"] for b in batch])\n rewards = np.array([b[\"reward\"] for b in batch])\n next_inputs = np.array([b[\"next_state\"] for b in batch])\n\n actions_one_hot = np.eye(self.action_space_size)[actions]\n\n next_qvalues = np.squeeze(self.target_network.model(next_inputs))\n targets = rewards + self.discount * np.amax(next_qvalues, axis=-1)\n\n self.online_network.train_step(inputs, targets, actions_one_hot)", "def _train(self):\n self.network.train() # note that here we are calling torch.nn.Module train class method\n epochs_since_improvement = 0\n best_params = None\n self.calculate_validation_loss()\n best_validation_loss = self.validation_average_loss\n\n while epochs_since_improvement < 10:\n self.train_epoch()\n self.calculate_validation_loss()\n if self.validation_average_loss < best_validation_loss:\n epochs_since_improvement = 0\n best_validation_loss = self.validation_average_loss\n best_params = self.network.state_dict()\n else:\n epochs_since_improvement += 1\n LOGGER.info(\"Epochs since improvement in validation_loss: {} \\n\".format(epochs_since_improvement))\n if self.maximum_epochs_allowed is not None and self.epochs_trained >= self.maximum_epochs_allowed:\n break\n LOGGER.info(\"Training complete after {} epochs \\n\".format(self.epochs_trained))\n LOGGER.info(\"Best training loss achieved: {} \\n\".format(self.training_average_loss))\n LOGGER.info(\"Best validation loss achieved: {}\".format(self.validation_average_loss))\n self.learned_params = best_params\n self.network.load_state_dict(best_params)", "def train(self):\n learning_rate = tf.train.exponential_decay(self.learning_rate, self.global_step, self.decay_steps,self.decay_rate, staircase=True)\n self.learning_rate_=learning_rate\n #noise_std_dev = tf.constant(0.3) / (tf.sqrt(tf.cast(tf.constant(1) + self.global_step, tf.float32))) #gradient_noise_scale=noise_std_dev\n train_op = tf_contrib.layers.optimize_loss(self.loss_val, global_step=self.global_step,\n learning_rate=learning_rate, optimizer=\"Adam\",clip_gradients=self.clip_gradients)\n return train_op", "def train(model, optimizer: torch.optim, data: torch_geometric.data.Data):\n model.train()\n optimizer.zero_grad()\n F.nll_loss(model()[data.train_mask], data.y[data.train_mask]).backward()\n optimizer.step()\n\n model.eval()", "def train(self, train_x, train_y, optimzer='adam'):\n self.history = self.model.fit(train_x, train_y, epochs=self.epochs, batch_size=self.batch_size,\n verbose=self.verbose, shuffle=False)", "def train(self,X, y, learningrate = 0.01):\n \n result = self.forward(X)\n \n if (result != y):\n # need to update weights\n toadd = np.reshape(learningrate * np.append(X, 1), (1025, 1))\n self.NNs[y].weights[0] = np.add(self.NNs[y].weights[0],toadd)\n \n self.NNs[result].weights[0] = np.subtract(self.NNs[result].weights[0],toadd)", "def train_network(self, batch, episode_nr):\n global eps, eps_min, eps_decay\n for exp in batch:\n S = exp[0]\n S = process_state(S)\n action_number = exp[1]\n r = exp[2]\n S_new = exp[3]\n S_new = process_state(S_new)\n terminal = exp[4]\n\n if not terminal: # If agent is not at its final destination\n target = (r + gamma*np.amax(self.target.predict(S_new)[0]))\n else:\n target = r\n target_f = self.policy.predict(S)\n\n target_f[0][action_number] = target # Update something???\n self.policy.fit(S, target_f, epochs=1, verbose=0) # Train network # Verbose - makes training line?\n if self.epsilon > self.eps_min and episode_nr > 10:\n self.epsilon *= self.eps_decay # Decrease exploration rate", "def train(n_hidden_1, dropout, lr, wdecay, _run):\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n def get_xy_tensors(batch):\n x, y = batch\n x = torch.tensor(x.reshape(-1, 3072), dtype=torch.float32).to(device)\n y = torch.tensor(y, dtype=torch.long).to(device)\n return x, y\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n datasets = cifar10_utils.read_data_sets(DATA_DIR_DEFAULT, one_hot=False)\n train_data = datasets['train']\n test_data = datasets['test']\n model = MLP(n_inputs=3072, n_hidden=[n_hidden_1, 400], n_classes=10, dropout=dropout).to(device)\n loss_fn = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wdecay)\n\n log_every = 50\n avg_loss = 0\n avg_acc = 0\n for step in range(FLAGS.max_steps):\n x, y = get_xy_tensors(train_data.next_batch(FLAGS.batch_size))\n\n # Forward and backward passes\n optimizer.zero_grad()\n out = model.forward(x)\n loss = loss_fn(out, y)\n loss.backward()\n\n # Parameter updates\n optimizer.step()\n\n avg_loss += loss.item() / log_every\n avg_acc += accuracy(out, y) / log_every\n if step % log_every == 0:\n print('[{}/{}] train loss: {:.6f} train acc: {:.6f}'.format(step,\n FLAGS.max_steps,\n avg_loss, avg_acc))\n _run.log_scalar('train-loss', avg_loss, step)\n _run.log_scalar('train-acc', avg_acc, step)\n avg_loss = 0\n avg_acc = 0\n\n # Evaluate\n if step % FLAGS.eval_freq == 0 or step == (FLAGS.max_steps - 1):\n x, y = get_xy_tensors(test_data.next_batch(test_data.num_examples))\n model.eval()\n out = model.forward(x)\n model.train()\n test_loss = loss_fn(out, y).item()\n test_acc = accuracy(out, y)\n print('[{}/{}] test accuracy: {:6f}'.format(step, FLAGS.max_steps, test_acc))\n\n _run.log_scalar('test-loss', test_loss, step)\n _run.log_scalar('test-acc', test_acc, step)\n ########################\n # END OF YOUR CODE #\n #######################", "def train(x_train, y_train, x_valid, y_valid, config):\n train_acc = []\n valid_acc = []\n train_loss = []\n valid_loss = []\n best_model = None\n NUM_EPOCH = config['epochs']\n EARLY_STOP = config['early_stop']\n EARLY_STOP_EPOCH = config['early_stop_epoch']\n BATCH_SIZE = config['batch_size']\n model = NeuralNetwork(config=config)\n loss = float('inf')\n best_loss = float('inf')\n best_accuracy = 0\n patience = 0\n\n\n\n for i in range (NUM_EPOCH):\n\n x_train, y_train = shuffle(x_train, y_train)\n x_train = np.asarray(x_train)\n y_train = np.asarray(y_train)\n\n for j in range (0, len(x_train), BATCH_SIZE):\n start = j\n end = j + BATCH_SIZE\n if (end > len(x_train)):\n end = len(x_train)\n\n x = x_train[start:end]\n y = y_train[start:end]\n\n model.forward(x, y) \n model.backward()\n\n train_epoch_loss = model.forward(x_train, y_train)\n \n train_predict = np.zeros_like(model.y)\n train_predict[np.arange(len(model.y)), model.y.argmax(1)] = 1\n\n train_accuracy = sum([1 if all(train_predict[i] == y_train[i]) else 0 for i in range(len(y_train))])/len(y_train)\n\n train_loss.append(train_epoch_loss)\n train_acc.append(train_accuracy)\n \n valid_epoch_loss = model.forward(x_valid, y_valid)\n valid_predict = np.zeros_like(model.y)\n valid_predict[np.arange(len(model.y)), model.y.argmax(1)] = 1\n\n valid_accuracy = sum([1 if all(valid_predict[i] == y_valid[i]) else 0 for i in range(len(y_valid))])/len(y_valid)\n\n valid_loss.append(valid_epoch_loss)\n valid_acc.append(valid_accuracy)\n\n\n print(\"Epoch:\", i, \"Train Accuracy|Loss:\", train_accuracy,\"| \", train_epoch_loss, \"~|~ Valid: \", valid_accuracy, \" | \", valid_epoch_loss)\n if EARLY_STOP:\n if valid_epoch_loss > best_loss and patience >= EARLY_STOP_EPOCH:\n return train_acc, valid_acc, train_loss, valid_loss, best_model\n elif valid_epoch_loss > best_loss and patience < EARLY_STOP_EPOCH:\n patience += 1\n else:\n patience = 0\n if valid_epoch_loss < best_loss:\n best_loss = valid_epoch_loss\n best_accuracy = valid_accuracy\n best_model = copy.deepcopy(model)\n\n loss = valid_epoch_loss\n\n \n best_model = model \n return train_acc, valid_acc, train_loss, valid_loss, best_model", "def train(self, n_epochs=100, mini_batch_size=1, learning_rate=0.1):\n # theano function parameter representing the index of X\n # where we should start training on.\n index = T.lscalar()\n # theano function variable that represents a batch of examples\n # from X[index] to X[index+batch_size]\n x, y = T.matrices('x', 'y')\n\n # Probability of being target t = sigmoid = 1 / (1 + e^{-(Wx+b)})\n # probability_t = T.nnet.sigmoid(T.dot(x, self.w)+self.b) # matrix of probabilities of size m*t\n probability_t = 1. / (1. + T.exp(-T.dot(x, self.w) + self.b)) # matrix of probabilities of size m*t\n\n # compare matrix of probabilities to the true labels matrix Y of values 0 or 1\n cost = T.mean(T.nnet.categorical_crossentropy(probability_t, y))\n\n params = [self.w, self.b] # parameters to optimize\n g_params = T.grad(cost=cost, wrt=params) # gradient with respect to W and b\n\n # update W and b like so: param = param - lr*gradient\n updates = []\n for param, g_param in zip(params, g_params):\n updates.append((param, param - learning_rate * g_param))\n\n # train function: (index -> cost) with x=X[i:i+mini_batch] & y=Y[i:i+mini_batch]\n train = theano.function(\n inputs=[index],\n outputs=[cost],\n updates=updates,\n givens={x: self.x_train[index:index + mini_batch_size],\n y: self.y_train[index:index + mini_batch_size]\n }\n )\n\n import time\n start_time = time.clock()\n for epoch in xrange(n_epochs): # xrange ~ range but doesn't create a list! (faster and less memory used)\n print \"Epoch:\", epoch\n current_cost = 0\n # train from 0 to number of examples (m), by skipping batch.\n for row in xrange(0, self.m, mini_batch_size):\n current_cost = train(row)[0]\n print \"cost =\", current_cost\n end_time = time.clock()\n print \"Average time per epoch = \", (end_time - start_time) / n_epochs", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def train(self):\n\t\tprint(\"Training...\")\n\t\tprev_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\tfor i in range(self.max_iter):\n\t\t\t# gradient descent\n\t\t\tdw0, dw = self.compute_grad(self.w0, self.w, 'train')\n\t\t\tself.w0 -= self.step_size * dw0\n\t\t\tself.w = [wj-self.step_size*dwj for wj, dwj in zip(self.w, dw)]\n\t\t\tcurr_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\t\tif i%(self.max_iter/10)==0:\n\t\t\t\tprint('iteration: {}, loss: {}'.format(i, curr_loss))\n\t\t\tif abs(curr_loss-prev_loss) < self.tolerance:\n\t\t\t\tprint('# of iterations:',i)\n\t\t\t\tbreak\n\t\tself.trained = True\n\t\tprint('Mean log loss of TRAIN data:', curr_loss)", "def trainData(self, X, y, NeuralNet, epochs):", "def train(self, X, T):\r\n assert len(self.neurons) > 0, \"Add neurons before training ELM\"\r\n X, T = self._checkdata(X, T)\r\n H = self.project(X)\r\n self.Beta = np.linalg.pinv(H).dot(T)", "def train(self,features,y):\r\n \r\n if self.learn_type == \"nn\":\r\n #generate supervised dataset\r\n return(self.learner.train_on_batch(features,y))\r\n elif self.learn_type == \"linear\":\r\n grad = 0\r\n n = len(features)\r\n for i in range(n):\r\n #sum over the instances to get an estimate of the gradient\r\n print((y[i] - self.learner.activate(features[i])))\r\n grad -= (y[i] - self.learner.activate(features[i])) * \\\r\n self.learner.grad(features[i])\r\n grad /= n\r\n #update paramter\r\n param = np.copy(self.learner.param)\r\n self.learner.param = param - self.alpha * grad\r\n #print(self.learner.param)\r", "def train(epoch, w1, w2, w3, samples, n_batches, bias_w1, bias_w2, bias_w3, n_hidden_layer, n_hidden_layer_2, \n batch_size, train_data, train_output, valid_data, valid_output, learning_rate, lmbda, l1):\n # Initialise empty error and accuracy arrays\n errors = np.zeros((epoch,))\n accuracies = np.zeros((epoch,))\n\n # If it is only a single layer network initialise variables for calcualting average weight\n if (n_hidden_layer == 0) and (n_hidden_layer_2 == 0):\n tau = 0.01\n average_weight = np.zeros(w1.shape)\n average_weight_plot = np.zeros((epoch,1))\n prev_w1 = np.copy(w1)\n\n # Epoch loop\n for i in range(epoch):\n # Build an array of shuffled indexes\n shuffled_indexes = np.random.permutation(samples)\n\n # Batch loop\n for batch in range(0, n_batches):\n \n # Initialise empty change in weight and bias depending on number of layers\n delta_w1 = np.zeros(w1.shape)\n delta_bias_w1 = np.zeros(bias_w1.shape)\n if n_hidden_layer > 0:\n delta_w2 = np.zeros(w2.shape)\n delta_bias_w2 = np.zeros(bias_w2.shape)\n if n_hidden_layer_2 > 0:\n delta_w3 = np.zeros(w3.shape)\n delta_bias_w3 = np.zeros(bias_w3.shape)\n\n # Extract indexes, and corresponding data from the input and expected output\n indexes = shuffled_indexes[batch*batch_size : (batch+1)*batch_size]\n x0 = train_data[indexes].T\n t = train_output[indexes].T\n\n # Apply input weights to summation of inputs and add bias terms\n h1 = np.matmul(w1, x0) + bias_w1\n # Apply the activation function to the summation\n x1 = relu(h1)\n \n # For first hidden layer\n if n_hidden_layer > 0:\n # Apply input weights to summation of inputs and add bias terms\n h2 = np.matmul(w2, x1) + bias_w2\n # Apply the activation function to the summation\n x2 = relu(h2)\n\n # For second hidden layer\n if n_hidden_layer_2 > 0:\n # Apply input weights to summation of inputs and add bias terms\n h3 = np.matmul(w3, x2) + bias_w3\n # Apply the activation function to the summation\n x3 = relu(h3)\n\n # Error signal\n error = t - x3\n # Local gradient for second hidden layer\n delta_3 = relu_prime(x3) * error\n # Change in weight at second hidden layer\n delta_w3 = (learning_rate / batch_size) * np.matmul(delta_3, x2.T)\n # Change in bias at second hidden layer\n delta_bias_w3 = (learning_rate / batch_size) * np.sum(delta_3, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w3 = delta_bias_w3.reshape(-1, 1)\n\n # Local gradient for first hidden layer\n delta_2 = relu_prime(h2) * np.matmul(w3.T, delta_3)\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n\n else:\n # Error signal\n error = t - x2\n # Change in weight at first hidden layer\n delta_2 = relu_prime(x2) * error\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n else:\n # Error signal\n error = t - x1\n # Local gradient for input layer\n delta_1 = relu_prime(x1) * error\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n # Checks if L1 error is used as well\n if l1:\n # Takes away the derivative of L1 from the change in weight\n delta_w1 -= (learning_rate / batch_size) * lmbda * np.sign(w1)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w1 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w1)\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w2 -= (learning_rate / batch_size) * lmbda * np.sign(w2)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w2 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w2)\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w3 -= (learning_rate / batch_size) * lmbda * np.sign(w3)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w3 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w3)\n\n\n # Add change in weight\n w1 += delta_w1\n # Add change in bias\n bias_w1 += delta_bias_w1\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Add change in weight\n w2 += delta_w2\n # Add change in bias\n bias_w2 += delta_bias_w2\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Add change in weight\n w3 += delta_w3\n # Add change in bias\n bias_w3 += delta_bias_w3\n\n # Calculate and print average weight (single layer), accuracy and error at the end of the epoch\n print(\"------ Epoch {} ------\".format(i+1))\n if n_hidden_layer == 0:\n # If single layer present calculate average weight change\n average_weight_plot, average_weight = calculate_average_weight(tau, average_weight, average_weight_plot,\n prev_w1, w1, i)\n prev_w1 = np.copy(w1)\n # Calculate accuracy and error based on validation data\n accuracies[i], errors[i] = test(valid_data, valid_output, n_hidden_layer, n_hidden_layer_2, w1, w2, w3, \n bias_w1, bias_w2, bias_w3, l1, lmbda)\n print(\"---------------------\")\n print(\"\\n\")\n \n # Plot results for error, accruacy and average weight (single layer)\n #if n_hidden_layer == 0:\n # plot_results(average_weight_plot, 'Epoch', 'Average Weight Update Sum',\n # 'Average Weight Update Sum per Epoch', 'Average Weight Update Sum')\n #plot_results(errors, 'Epoch', 'Error', 'Error on Validation Set per Epoch', 'Error')\n #plot_results(accuracies, 'Epoch', 'Accuracy', 'Accuracy on Validation Set per Epoch', 'Accuracy')\n return w1, w2, w3, bias_w1, bias_w2, bias_w3", "def train(self, n_iterations, test_samples, test_labels, training_samples, training_labels):\n\n # Keep track of the running time for training the neural network\n start_time_network = time.time()\n\n # Train the neural network with the defined number of iterations on the training data batches\n all_training_loss = []\n all_test_loss = []\n for iteration in tqdm(range(n_iterations+1)):\n # The dataset create automatic batches, so there is no need to define the samples\n self.session.run(self.optimizer)\n # # Keep track of the training and test loss\n # training_loss = self.session.run(tf.reduce_mean(self.cost),\n # feed_dict={\"Input/BatchSamples:0\": training_samples,\n # \"Input/BatchLabels:0\": training_labels})\n # test_loss = self.session.run(tf.reduce_mean(self.cost), feed_dict={\"Input/BatchSamples:0\": test_samples,\n # \"Input/BatchLabels:0\": test_labels})\n # # Store the loss in percentages\n # all_training_loss.append(training_loss*100)\n # all_test_loss.append(test_loss*100)\n\n # Check for every 100th iteration the loss\n if iteration % 1000 == 0:\n training_cost = self.session.run(tf.reduce_mean(self.cost))\n print(\"STEP {} | Training cost: {:.4f}\".format(iteration, training_cost*100))\n test_accuracy = self.evaluate(evaluation_samples=test_samples, evaluation_labels=test_labels)\n print(\"\\t\\t Test accuracy: {:.2f}%\".format(test_accuracy[1]))\n\n # Get the total running time of the neural network\n network_run_time = time.time() - start_time_network\n\n return network_run_time, all_training_loss, all_test_loss", "def train(self, X_train, Y_train, X_test = None, Y_test = None, epochs = 100, batch_size = 32, learning_rate = 0.005):\n m_train = X_train.shape[1]\n for epoch in range(epochs + 1):\n batch = np.arange(0, m_train)\n np.random.shuffle(batch)\n for k in range(m_train // batch_size + 1):\n if k * batch_size < m_train:\n X_mini_batch = X_train[:,batch[k * batch_size:(k + 1) * batch_size]]\n Y_mini_batch = Y_train[:,batch[k * batch_size:(k + 1) * batch_size]]\n self.update_weights(X_mini_batch, Y_mini_batch, learning_rate)\n \n if epoch % 10 == 0: \n # Loss function\n A2 = self.feedforward(X_train)\n cost = (1 / m_train) * np.sum(-np.multiply(Y_train, np.log(A2)) - np.multiply(1 - Y_train, np.log(1 - A2)))\n print(f\"epoch:{epoch}, Cost: {cost}, \", end = '')\n # Accutacy on training data\n if X_test is not None and Y_test is not None:\n A2_test = self.feedforward(X_test)\n class_pred = A2_test.argmax(axis = 0)\n class_actual = Y_test.argmax(axis = 0)\n acc = sum(class_actual == class_pred)\n print(f\"accuracy:{acc}/{X_test.shape[1]}\")", "def TrainNetwork(self):\n\n self.logger.info('Train Network')\n self.netWork.TrainGenerator()\n\n # # train NetworkLSTM\n self.logger.info('Train NetworkLSTM')\n self.netWork.TrainLSTM()", "def train_step(optimizer,\n inputs,\n outputs,\n programs,\n learning_rate_fn,\n config,\n train_rng=None):\n # We handle PRNG splitting inside the top pmap, rather\n # than handling it outside in the training loop - doing the\n # latter can add some stalls to the devices.\n train_rng, new_train_rng = jax.random.split(train_rng)\n\n weights = jnp.where(programs > 0, 1, 0).astype(jnp.float32)\n\n def loss_fn(params):\n \"\"\"Loss function used for training.\"\"\"\n logits = models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n programs,\n rngs={'dropout': train_rng})\n loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)\n mean_loss = loss / weight_sum\n return mean_loss, logits\n\n step = optimizer.state.step\n lr = learning_rate_fn(step)\n grad_fn = jax.value_and_grad(loss_fn, has_aux=True)\n (_, logits), grad = grad_fn(optimizer.target)\n grad = jax.lax.pmean(grad, 'batch')\n new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)\n\n # Get metrics.\n metrics = compute_metrics(logits, programs, weights)\n metrics['learning_rate'] = lr\n return new_optimizer, metrics, new_train_rng", "def train(self, inputs, targets, eta, niterations):\n ndata = np.shape(inputs)[0] # number of data samples\n # adding the bias\n inputs = np.concatenate((inputs, -np.ones((ndata, 1))), axis=1)\n\n # numpy array to store the update weights\n updatew1 = np.zeros((np.shape(self.weights1)))\n updatew2 = np.zeros((np.shape(self.weights2)))\n updatew3 = np.zeros((np.shape(self.weights3)))\n\n self.Errors = []\n for n in range(niterations):\n\n #############################################################################\n # TODO: implement the training phase of one iteration which consists of two phases:\n # the forward phase and the backward phase. you will implement the forward phase in \n # the self.forwardPass method and return the outputs to self.outputs. Then compute \n # the error (hints: similar to what we did in the lab). Next is to implement the \n # backward phase where you will compute the derivative of the layers and update \n # their weights. \n #############################################################################\n\n # forward phase \n self.outputs = self.forwardPass(inputs)\n\n # Error using the sum-of-squares error function\n error = 0.5 * np.sum((self.outputs - targets) ** 2)\n\n if np.mod(n, 100) == 0:\n self.Errors.append(error)\n print(\"Iteration: \", n, \" Error: \", error)\n\n # backward phase \n # Compute the derivative of the output layer. NOTE: you will need to compute the derivative of \n # the softmax function. Hints: equation 4.55 in the book. \n # deltao = (self.outputs - targets) * (self.outputs - self.outputs ** 2)\n deltao = (self.outputs - targets) * self.outputs * (1 - self.outputs)\n\n # compute the derivative of the second hidden layer\n\n deltah2 = self.beta * self.hidden2 * (1.0 - self.hidden2) * (np.dot(deltao, np.transpose(self.weights3)))\n\n\n # compute the derivative of the first hidden layer\n deltah1 = self.beta * self.hidden1 * (1.0 - self.hidden1) * (np.dot(deltah2[:, :-1], np.transpose(self.weights2)))\n\n # update the weights of the three layers: self.weights1, self.weights2 and self.weights3\n # here you can update the weights as we did in the week 4 lab (using gradient descent) \n # but you can also add the momentum\n\n updatew1 = eta * np.dot(np.transpose(inputs), deltah1[:, :-1]) + self.momentum * updatew1\n updatew2 = eta * np.dot(np.transpose(self.hidden1), deltah2[:, :-1]) + self.momentum * updatew2\n updatew3 = eta * np.dot(np.transpose(self.hidden2), deltao) + self.momentum * updatew3\n\n self.weights1 -= updatew1\n self.weights2 -= updatew2\n self.weights3 -= updatew3\n\n #############################################################################\n # END of YOUR CODE \n #############################################################################", "def prep_train(lr=0.0002, nz=100):\n\tG,D=build_net(nz=nz)\n\n\tx = T.tensor4('x')\n\tz = T.matrix('z')\n\n\t#Get outputs G(z), D(G(z))) and D(x)\n\tG_z=get_output(G,z)\n\tD_G_z=get_output(D,G_z)\n\tD_x=get_output(D,x)\n\n\t# test samples\n\tsamples=get_output(G,z,deterministic=True)\n\n\tprint 'get all params'\n\t#Get parameters of G and D\n\tparams_d=get_all_params(D, trainable=True)\n\tparams_g=get_all_params(G, trainable=True)\n\n # compute cost\n\tloss_d = - (D_x.mean() - D_G_z.mean()) \n\tloss_g = - D_G_z.mean() \n\n # update \n\tupdate_d = rmsprop(loss_d,params_d,learning_rate = lr) \n\tupdate_g = rmsprop(loss_g,params_g,learning_rate = lr) \n\n\tprint 'getting weights'\n # clip all the weights # W=GlorotUniform() in dis - first conv layer\n\tparams=get_all_param_values(D, trainable=True)\n\tw_clip= [np.clip(w, -0.01, 0.01) for w in params]\n\tset_all_param_values(D,w_clip, trainable=True)\n\n\tprint 'obtained weights'\n\n\t#theano train functions\n\ttrain_fns={}\n\ttrain_fns['gen']=theano.function(inputs=[z], outputs=loss_g, updates=update_g)\n\ttrain_fns['dis']=theano.function(inputs=[x,z], outputs=loss_d, updates=update_d)\n\n\tprint 'train_fns'\n\n\t#theano test functions\n\ttest_fns={}\n\ttest_fns['sample']=theano.function(inputs=[z],outputs=samples)\n\n\treturn train_fns, test_fns, G, D", "def train(net, optimizer):\n global back_loss\n\n # BACKPROPAGATE\n start_backprop_time = time.time()\n\n optimizer.zero_grad()\n back_loss.backward() # calculate grad\n optimizer.step() # update rewards\n\n backprop_time = time.time() - start_backprop_time\n\n # CONSTRAIN -- clamp minimum rewards at 0, then normalize using L1 norm\n constrain_time = time.time()\n net.R.data = torchfun.normalize(net.R.data.clamp(min=0), p=1, dim=0) * R_TOTAL\n constrain_time = time.time() - constrain_time\n\n # FORWARD\n forward_time = go_forward(net)\n\n return (backprop_time + constrain_time + forward_time, constrain_time)", "def train(self, network, training_examples, iterations, unsupervised=False):\n if unsupervised:\n # For now this means we are training a sparse autoencoder.\n # Therefore, we need to keep a running estimate of the\n # \"sparsity\" of a node, where we try to keep the activation\n # of the node stay close to a small value near 0 known as\n # rho (Greek lower case p) or the 'sparsity parameter',\n # which we will set to 0.05.\n # This forces the network to learn the smallest set of features\n # necessary to accurately build a close estimate of the original\n # input vector\n # In this case, we set the input vector equal to the target vector,\n # and usually set a smaller value for the number of hidden nodes\n # Then perform normal backpropagation, and during that, for each\n # hidden node, also update the rho_estimate, and then update the\n # threshold value\n rho = 0.05\n rho_estimates = [0] * len(network.layers[0].neurons) # set to 0 for each node\n beta = 0.2 # the learning rate for updating the threshold terms\n for iteration_counter in range(iterations):\n random.shuffle(training_examples)\n # for each row of data\n for training_example in training_examples:\n input_vector = training_example[0]\n target_output_vector = training_example[1]\n # prime the network on this row of input data\n # -this will cause output (activation) values to be\n # set for each neuron\n network.forward(input_vector)\n\n # Note: next_layer_deltas is a vector of the single\n # delta values for each node in the next\n # (forward) layer\n next_layer_deltas = []\n next_layer_weights = []\n isOutputLayer = True\n for layer in reversed(network.layers): # iterate backwards\n this_layer_deltas = [] # values from current layer\n this_layer_weights = []\n for j, neuron in enumerate(layer.neurons):\n derivative = neuron.activation_function.derivative\n # The output layer neurons are treated slightly\n # different than the hidden neurons\n if isOutputLayer:\n if neuron.activation_function.name == \"logistic\":\n # derivative simplifies down to just\n # subtracting the target from the\n # hypothesis\n delta = neuron.output - target_output_vector[j]\n else: # Tanh or Linear\n delta = (neuron.output-target_output_vector[j])*derivative(neuron.output)\n else: # for the hidden layer neurons\n # Need to sum the products of the delta of\n # a neuron in the next (forward) layer and the\n # weight associated with the connection between\n # this hidden layer neuron and that neuron.\n # This will basically determine how much this\n # neuron contributed to the error of the neuron\n # it is connected to\n # Note: next_layer_deltas is a vector of the \n # single delta values for each node in the next\n # (forward) layer\n sum_value = 0.0\n for next_delta, weights in zip(next_layer_deltas,\n next_layer_weights):\n sum_value += weights[j] * next_delta\n delta = (derivative(neuron.output) *\n sum_value)\n # now store the delta and the list of weights\n # for this neuron into these storage lists for the\n # whole layer\n this_layer_deltas.append(delta)\n this_layer_weights.append(neuron.weights)\n # Now, compute the gradient (partial deriv of cost\n # func, J, w/ respect to parameter ij) for each\n # weight_ij (parameter_ij) associated with\n # this neuron\n for ij, input_ij in enumerate(neuron.input_vector):\n # compute gradient (partial deriv of cost J w/\n # respect to parameter ij)\n # Note: index ij means from a previous\n # layer node i to this layer node j\n # Then Gradient Descent: multiply by the learning\n # rate, and subtract from the current value\n # Note: Subtract in order to minimize error, since\n # partial derivs point in direction of gradient\n # AScent\n gradient_ij = delta * input_ij\n neuron.weights[ij] -= self.learning_rate * gradient_ij\n # Now, compute the gradient (partial deriv of cost\n # func, J, with respect to parameter ij) for the\n # threshold value (parameter_0j), by using a \"1\" as\n # the threshold \"input value\"\n # -Note: index 0j means from a previous\n # layer threshold node 0 (threshold always has\n # index i=0) to this layer node j\n # -can also think of it as the threshold being\n # internal to this neuron\n gradient_0j = delta * 1\n neuron.threshold -= self.learning_rate * gradient_0j\n if unsupervised and not isOutputLayer:\n rho_estimates[j] = (0.999*rho_estimates[j] +\n 0.001*neuron.output)\n neuron.threshold -= (self.learning_rate * beta *\n (rho_estimates[j] - rho))\n # Once this layer is done, store the gradients and weights\n # from the current layer for the next layer iteration\n # (moving backwards)\n next_layer_deltas = this_layer_deltas\n next_layer_weights = this_layer_weights\n isOutputLayer = False\n # Note: this is after the while loop\n self.iterations = iteration_counter", "def train(self, data_set, epochs, verbose=False, random_seed=None):\r\n if random_seed is not None:\r\n seed(random_seed)\r\n\r\n for i in range(epochs):\r\n print(i)\r\n shuffle(data_set)\r\n\r\n #check if training is done\r\n \"\"\"\r\n if self.test(data_set) == 1.0:\r\n if verbose:\r\n print(\"\\nTraining converged on epoch\", i)\r\n return\r\n \"\"\"\r\n #loop through each vector/ layer\r\n for input, target_output in data_set:\r\n\r\n output = self.predict(input)\r\n\r\n self.backpropagation(target_output)", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(train_iter, test_iter, net, loss, trainer, ctx, num_epochs):\n # 加入学习曲线显示\n weight_url = '//home//jim//shanghai_index//data//weights.params'\n logdir_url = '//home//jim//shanghai_index//log'\n sw = SummaryWriter(logdir = logdir_url, flush_secs=10)\n # 提取已有参数\n if os.path.exists(weight_url):\n print(u'已含有旧权重文件,正在载入继续训练并更新')\n net.load_parameters(weight_url, allow_missing = True, ignore_extra = True)\n # 训练\n for epoch in range(num_epochs):\n train_loss, train_acc, train_step, start = 0.0, 0.0, 0, time.time()\n for x1, x2, y in train_iter:\n batch_size = x1.shape[0]\n with autograd.record():\n y_hats = net(x1, x2)\n print('y_hat = {}'.format(y_hats))\n print('y = {}'.format(y))\n ls = loss(y_hats, y)\n print('loss:{}'.format(ls))\n ls.backward()\n trainer.step(batch_size)\n train_loss += np.mean(ls.asnumpy())\n train_acc += acc(y_hats, y)\n train_step += 1\n \n print('epoch {}, loss {}, train acc {}, time {} sec'.format(epoch + 1,\n train_loss/train_step,\n train_acc/train_step,\n time.time() - start))\n # 向tensorboard填数据\n sw.add_scalar(tag = 'Loss_and_acc', \\\n value = {'train_loss': train_loss/train_step, 'train_acc': train_acc/train_step}, \\\n global_step = epoch)\n # 加入某个层权重分布变化等高图\n grads = [i.grad() for i in net.collect_params('.*weight|.*bias').values()]\n param_names = [name for name in net.collect_params('.*weight|.*bias').keys()]\n assert len(grads) == len(param_names)\n # logging the gradients of parameters for checking convergence\n for i, name in enumerate(param_names):\n sw.add_histogram(tag = name, values = grads[i], global_step = epoch, bins = 20)\n\n # 加入保存参数\n net.save_parameters(weight_url)", "def train(nz=100, lr=0.0002, batchSize=64, epoch=10, outDir='../Experiment/wgan'):\n\t# load the images for training\n\tif opts.celeba : \n\t\txTrain = load_CelebA()\n\tif opts.mnist : \n\t\txTrain,_,_,_,_,_ = load_MNIST()\n\tprint 'Images for training -- shape:{}, min:{}, max:{} '.format(np.shape(xTrain), np.min(xTrain), np.max(xTrain))\n\n\ttrain_fns, test_fns, G, D = prep_train(nz=nz, lr=lr)\n\n\tsn,sc,sx,sy=np.shape(xTrain)\n\tbatches=int(np.floor(float(sn)/batchSize))\n\n\t#keep training info\n\tg_cost=[]\n\td_cost=[]\n\n\ttimer=time.time()\n\t#Train D (outerloop)\n\tprint 'epoch \\t batch \\t cost G \\t cost D \\t time (s)'\n\tfor e in range(epoch):\n\t\t#Do for all batches\n\t\tfor b in range(batches):\n\t\t\tZ = np.random.normal(loc=0.0, scale=1.0, size=(sn,nz)).astype(floatX) \n\t\t\tcost_D=train_fns['dis'](xTrain[b*batchSize:(b+1)*batchSize],Z[b*batchSize:(b+1)*batchSize])\n\t\t\tcost_G=train_fns['gen'](Z[b*batchSize:(b+1)*batchSize])\n\t\t\tprint e,'\\t',b,'\\t',cost_G,'\\t', cost_D,'\\t', time.time()-timer\n\t\t\ttimer=time.time()\n\t\t\tg_cost.append(cost_G)\n\t\t\td_cost.append(cost_D)\n\n\n\t#save plot of the cost\n\tplt.plot(range(batches*epoch),g_cost, label=\"G\")\n\tplt.plot(range(batches*epoch),d_cost, label=\"D\")\n\tplt.legend()\n\tplt.xlabel('epoch')\n\tplt.savefig(os.path.join(outDir,'cost_regular.png'))\n\n\treturn train_fns, test_fns, G, D", "def train(self, trainingData, trainingLabels, validationData, validationLabels ):\n\n self.setWeights(trainingData.shape[1])\n # DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR\n # THE AUTOGRADER WILL LIKELY DEDUCT POINTS.\n \n # Hyper-parameters. Your can reset them. Default batchSize = 100, weight_decay = 1e-3, learningRate = 1e-2\n \"*** YOU CODE HERE ***\"\n self.batchSize = 100\n self.weight_decay = 1e-3\n self.learningRate = 0.1\n\n def Softmax(x):\n x_max = np.max(x, axis=0)\n x_exp = np.exp(x - x_max)\n x_exp_sum = np.sum(x_exp, axis=0)\n return x_exp / x_exp_sum\n\n for iteration in range(self.max_iterations):\n if iteration % 10 == 0:\n print(\"Starting iteration \", iteration, \"...\")\n self.learningRate *= 0.9\n dataBatches = self.prepareDataBatches(trainingData, trainingLabels)\n for batchData, batchLabel in dataBatches:\n \"*** YOUR CODE HERE ***\"\n Y = np.zeros((len(self.legalLabels), self.batchSize))\n for i in range(self.batchSize):\n Y[batchLabel[i]][i] = 1\n Y_pred = Softmax((batchData @ self.weights + self.bias).T)\n d_weight = ((Y_pred - Y) @ batchData / batchData.shape[0]).T + self.weight_decay * sum(self.weights)\n d_bias = np.mean(Y_pred - Y, axis=1) + self.weight_decay * sum(self.bias)\n self.weights -= d_weight * self.learningRate\n self.bias -= d_bias * self.learningRate", "def train(data_dir, epochs, batch_size, lr, optimizer, categories):\n run = Run.get_context()\n\n run.log('optimizer', optimizer)\n run.log('minibatch_size', batch_size)\n run.log('learning_rate', lr)\n run.log('categories', categories)\n\n # Get model and data objects\n train_generator, validation_generator = create_dataset_generators(\n data_dir, batch_size, categories\n )\n \n model = create_model(lr=lr, classes=train_generator.num_classes, optimizer_name=optimizer)\n print(model.optimizer)\n \n os.makedirs(\"./outputs\", exist_ok=True)\n\n with open('./outputs/labels.json', 'w') as fo:\n json.dump(train_generator.class_indices, fo)\n\n aml_callback = AzureMLCallback(run)\n checkpointer = ModelCheckpoint(\n filepath=\"./outputs/weights_{epoch:02d}.hdf5\", period=25)\n\n history = model.fit_generator(\n train_generator,\n steps_per_epoch=train_generator.samples / batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=validation_generator.samples / batch_size,\n verbose=2,\n callbacks=[aml_callback, checkpointer],\n )\n\n model.save(\"./outputs/final_model.hdf5\")", "def train_func(self, data):\n self.net.train()\n\n outputs, losses = self.forward(data)\n\n self.update_network(losses)\n self.record_losses(losses, 'train')\n\n return outputs, losses", "def train(network,X,y):\r\n \r\n # Get the layer activations\r\n layer_activations = forward(network,X)\r\n logits = layer_activations[-1]\r\n \r\n # Compute the loss and the initial gradient\r\n loss = softmax_crossentropy_with_logits(logits,y)\r\n loss_grad = grad_softmax_crossentropy_with_logits(logits,y)\r\n \r\n for i in range(1, len(network)):\r\n loss_grad = network[len(network) - i].backward(layer_activations[len(network) - i - 1], loss_grad)\r\n #loss_grad = network[0].backward(X, loss_grad)\r\n return np.mean(loss)", "def train_and_predict(self, X, y, num_epochs, experimental,\n initial_learning_rate,\n optimizer_name,\n log_every_n_iter,\n results_dir,\n checkpoint_dir,\n experiment_id,\n summary_dir,\n save_checkpoint_steps,\n save_summaries_steps,\n alternate_optimization,\n gcn_opt_steps,\n adj_opt_steps):\n\n tf.summary.scalar(\"accuracy/train\", self.accuracy_train)\n tf.summary.scalar(\"accuracy/val\", self.accuracy_val)\n tf.summary.scalar(\"accuracy/test\", self.accuracy_test)\n tf.summary.scalar(\"mnlp/train\", self.mnlp_train)\n tf.summary.scalar(\"mnlp/val\", self.mnlp_val)\n tf.summary.scalar(\"mnlp/test\", self.mnlp_test)\n\n tf.summary.scalar(\"loss/elbo/train\", tf.reduce_sum(self.elbo_train))\n tf.summary.scalar(\"loss/elbo/val\", tf.reduce_sum(self.elbo_val))\n tf.summary.scalar(\"loss/elbo/test\", tf.reduce_sum(self.elbo_test))\n\n tf.summary.scalar(\"loss/train\", self.loss_train)\n tf.summary.scalar(\"loss/val\", self.loss_val)\n tf.summary.scalar(\"loss/test\", self.loss_test)\n\n tf.summary.scalar(\"loss/kl_train\", self.kl)\n tf.summary.scalar(\"loss/ell_train\", self.ell_train)\n tf.summary.scalar(\"loss/reg_train\", self.reg)\n\n global_step = tf.train.get_or_create_global_step()\n\n if experimental:\n\n train_op = tf.contrib.layers.optimize_loss(\n self.loss_train,\n global_step=global_step,\n learning_rate=initial_learning_rate,\n optimizer=optimizer_name,\n summaries=[\"gradients\"],\n )\n\n train_op_gcn = tf.contrib.layers.optimize_loss(self.loss_train,\n global_step=global_step,\n learning_rate=initial_learning_rate,\n optimizer=optimizer_name,\n summaries=[\"gradients\"],\n variables=self.gcn.trainable_weights\n )\n\n train_op_adj = tf.contrib.layers.optimize_loss(self.loss_train,\n global_step=global_step,\n learning_rate=initial_learning_rate,\n optimizer=optimizer_name,\n summaries=[\"gradients\"],\n variables=self.posterior_param\n )\n\n else:\n\n if optimizer_name != \"Adam\":\n warnings.warn(\n (\n \"Optimizer '{}' only available in experimental mode. \"\n \"Defaulting to 'Adam'.\"\n ).format(optimizer_name)\n )\n\n optimizer = tf.train.AdamOptimizer(learning_rate=initial_learning_rate)\n train_op = optimizer.minimize(self.loss_train, global_step=global_step)\n\n train_op_gcn = optimizer.minimize(self.loss_train, global_step=global_step,\n var_list=self.gcn.trainable_weights)\n train_op_adj = optimizer.minimize(self.loss_train, global_step=global_step,\n var_list=self.posterior_param)\n\n if checkpoint_dir is not None:\n checkpoint_dir = os.path.join(checkpoint_dir, experiment_id)\n\n if results_dir is None:\n results_dir = checkpoint_dir\n results_dir = os.path.join(results_dir, experiment_id)\n\n header = (\n \"time, epoch, loss_train, loss_val, loss_test, accuracy_train, accuracy_val, accuracy_test, \"\n + \"kl_train, ell_train, reg_train, mnlp_train, mnlp_val, mnlp_test\"\n )\n\n results_filename = get_results_handler(results_dir, header)\n\n # global_step == epoch since each step is full pass over all data\n logger = tf.train.LoggingTensorHook(\n dict(\n epoch=global_step,\n loss_train=self.loss_train,\n loss_val=self.loss_val,\n loss_test=self.loss_test,\n accuracy_train=self.accuracy_train,\n accuracy_val=self.accuracy_val,\n accuracy_test=self.accuracy_test,\n kl_train=self.kl,\n ell_train=self.ell_train,\n reg_train=self.reg,\n mnlp_train=self.mnlp_train,\n mnlp_val=self.mnlp_val,\n mnlp_test=self.mnlp_test,\n learning_phase=K.learning_phase(),\n ),\n every_n_iter=log_every_n_iter,\n formatter=lambda tensors: (\n \"epoch={epoch:04d}, \"\n \"loss={loss_train:04f}, \"\n \"loss_val={loss_val:04f}, \"\n \"loss_test={loss_test:04f}, \"\n \"acc={accuracy_train:04f}, \"\n \"acc_val={accuracy_val:04f}, \"\n \"acc_test={accuracy_test:04f}, \"\n \"kl_train={kl_train:04f}, \"\n \"ell_train={ell_train:04f}, \"\n \"reg_train={reg_train:04f}, \"\n \"mnlp_train={mnlp_train:04f}, \"\n \"mnlp_val={mnlp_val:04f}, \"\n \"mnlp_test={mnlp_test:04f}, \"\n \"learning_phase={learning_phase}\"\n ).format(**tensors),\n )\n\n no_op = tf.no_op()\n\n metrics_list = [self.loss_train, self.loss_val, self.loss_test,\n self.accuracy_train, self.accuracy_val, self.accuracy_test,\n self.kl, self.ell_train, self.reg,\n self.mnlp_train, self.mnlp_val, self.mnlp_test]\n\n final_metrics_list = [self.loss_train, self.loss_val, self.loss_test,\n self.accuracy_train, self.accuracy_val, self.accuracy_test,\n self.mnlp_train, self.mnlp_val, self.mnlp_test]\n\n with tf.train.MonitoredTrainingSession(\n hooks=[logger],\n checkpoint_dir=checkpoint_dir,\n summary_dir=checkpoint_dir if summary_dir is None else summary_dir,\n save_checkpoint_steps=save_checkpoint_steps,\n save_summaries_steps=save_summaries_steps,\n ) as sess:\n if alternate_optimization is True:\n epoch = 0\n while epoch < num_epochs:\n gcn_step = 0\n adj_step = 0\n while gcn_step < gcn_opt_steps and epoch < num_epochs:\n sess.run(train_op_gcn, feed_dict={self.x: X, K.learning_phase(): True})\n get_intermediate_results(sess, metrics_list, self.x, X, epoch, results_filename)\n gcn_step += 1\n epoch += 1\n while adj_step < adj_opt_steps and epoch < num_epochs:\n get_intermediate_results(sess, metrics_list, self.x, X, epoch, results_filename)\n sess.run(train_op_adj, feed_dict={self.x: X, K.learning_phase(): True})\n adj_step += 1\n epoch += 1\n else:\n if checkpoint_dir is not None: # saves initial posterior\n save_posterior(sess, self.probs_tril, self.posterior_param, self.x, X, checkpoint_dir)\n for epoch in range(num_epochs):\n sess.run(train_op, feed_dict={self.x: X, K.learning_phase(): True})\n get_intermediate_results(sess, metrics_list, self.x, X, epoch, results_filename)\n\n get_final_results(sess, no_op, self.x, X, final_metrics_list, self.y_pred, y, self.mask_test, results_dir)", "def train(self, X_train, y_train, batch_size, num_epochs, alpha=0.8):\r\n batch_split = tf.data.Dataset.from_tensor_slices((X_train, y_train))\r\n batch_split = batch_split.batch(batch_size)\r\n for epoch in range(num_epochs):\r\n for step, (i,j) in enumerate(batch_split):\r\n with tf.GradientTape() as tape:\r\n predictions = self.predict(i)\r\n loss = self.calculate_loss(j, predictions)\r\n dloss_dw, dloss_db = tape.gradient(loss, [self.weights, self.biases])\r\n for k in range(len(self.weights)):\r\n self.weights[k].assign_sub(alpha * dloss_dw[k])\r\n self.biases[k].assign_sub(alpha * dloss_db[k])", "def train(self):\n # The number of iterations per epoch\n self.iters_per_epoch = len(self.data_loader_train)\n # Start with trained model if exists\n g_lr = self.g_lr\n d_lr = self.d_lr\n if self.checkpoint:\n start = int(self.checkpoint.split('_')[0])\n else:\n start = 0\n # Start training\n self.start_time = time.time()\n for self.e in range(start, self.num_epochs):\n for self.i, (img_A, img_B, _, _) in enumerate(self.data_loader_train):\n # Convert tensor to variable\n org_A = self.to_var(img_A, requires_grad=False)\n ref_B = self.to_var(img_B, requires_grad=False)\n\n # ================== Train D ================== #\n # training D_A\n # Real\n out = self.D_A(ref_B)\n d_loss_real = self.criterionGAN(out, True)\n # Fake\n fake = self.G_A(org_A)\n fake = Variable(fake.data)\n fake = fake.detach()\n out = self.D_A(fake)\n #d_loss_fake = self.get_D_loss(out, \"fake\")\n d_loss_fake = self.criterionGAN(out, False)\n \n # Backward + Optimize\n d_loss = (d_loss_real + d_loss_fake) * 0.5\n self.d_A_optimizer.zero_grad()\n d_loss.backward(retain_graph=True)\n self.d_A_optimizer.step()\n\n # Logging\n self.loss = {}\n self.loss['D-A-loss_real'] = d_loss_real.item()\n\n # training D_B\n # Real\n out = self.D_B(org_A)\n d_loss_real = self.criterionGAN(out, True)\n # Fake\n fake = self.G_B(ref_B)\n fake = Variable(fake.data)\n fake = fake.detach()\n out = self.D_B(fake)\n #d_loss_fake = self.get_D_loss(out, \"fake\")\n d_loss_fake = self.criterionGAN(out, False)\n \n # Backward + Optimize\n d_loss = (d_loss_real + d_loss_fake) * 0.5\n self.d_B_optimizer.zero_grad()\n d_loss.backward(retain_graph=True)\n self.d_B_optimizer.step()\n\n # Logging\n self.loss['D-B-loss_real'] = d_loss_real.item()\n\n # ================== Train G ================== #\n if (self.i + 1) % self.ndis == 0:\n # adversarial loss, i.e. L_trans,v in the paper \n\n # identity loss\n if self.lambda_idt > 0:\n # G_A should be identity if ref_B is fed\n idt_A = self.G_A(ref_B)\n loss_idt_A = self.criterionL1(idt_A, ref_B) * self.lambda_B * self.lambda_idt\n # G_B should be identity if org_A is fed\n idt_B = self.G_B(org_A)\n loss_idt_B = self.criterionL1(idt_B, org_A) * self.lambda_A * self.lambda_idt\n g_loss_idt = loss_idt_A + loss_idt_B\n else:\n g_loss_idt = 0\n \n # GAN loss D_A(G_A(A))\n fake_B = self.G_A(org_A)\n pred_fake = self.D_A(fake_B)\n g_A_loss_adv = self.criterionGAN(pred_fake, True)\n #g_loss_adv = self.get_G_loss(out)\n\n # GAN loss D_B(G_B(B))\n fake_A = self.G_B(ref_B)\n pred_fake = self.D_B(fake_A)\n g_B_loss_adv = self.criterionGAN(pred_fake, True)\n\n # Forward cycle loss\n rec_A = self.G_B(fake_B)\n g_loss_rec_A = self.criterionL1(rec_A, org_A) * self.lambda_A\n\n # Backward cycle loss\n rec_B = self.G_A(fake_A)\n g_loss_rec_B = self.criterionL1(rec_B, ref_B) * self.lambda_B\n\n # Combined loss\n g_loss = g_A_loss_adv + g_B_loss_adv + g_loss_rec_A + g_loss_rec_B + g_loss_idt\n \n self.g_optimizer.zero_grad()\n g_loss.backward(retain_graph=True)\n self.g_optimizer.step()\n\n # Logging\n self.loss['G-A-loss_adv'] = g_A_loss_adv.item()\n self.loss['G-B-loss_adv'] = g_A_loss_adv.item()\n self.loss['G-loss_org'] = g_loss_rec_A.item()\n self.loss['G-loss_ref'] = g_loss_rec_B.item()\n self.loss['G-loss_idt'] = g_loss_idt.item()\n\n # Print out log info\n if (self.i + 1) % self.log_step == 0:\n self.log_terminal()\n\n #plot the figures\n for key_now in self.loss.keys():\n plot_fig.plot(key_now, self.loss[key_now])\n\n #save the images\n if (self.i + 1) % self.vis_step == 0:\n print(\"Saving middle output...\")\n self.vis_train([org_A, ref_B, fake_A, fake_B, rec_A, rec_B])\n self.vis_test()\n\n # Save model checkpoints\n if (self.i + 1) % self.snapshot_step == 0:\n self.save_models()\n\n if (self.i % 100 == 99):\n plot_fig.flush(self.task_name)\n\n plot_fig.tick()\n \n # Decay learning rate\n if (self.e+1) > (self.num_epochs - self.num_epochs_decay):\n g_lr -= (self.g_lr / float(self.num_epochs_decay))\n d_lr -= (self.d_lr / float(self.num_epochs_decay))\n self.update_lr(g_lr, d_lr)\n print('Decay learning rate to g_lr: {}, d_lr:{}.'.format(g_lr, d_lr))", "def train(self) -> None:\n for _ in range(self.epochs):\n for x, y in zip(self.x_train, self.y_train):\n\n weights_gradient = [\n None for weight in self.weights\n ] # Initializing weight gradients for each layer which are going to be used to update the weights in the network.\n\n biases_gradient = [\n None for bias in self.biases\n ] # Initializing bias gradients for each layer which are going to be used to update the biases in the network.\n\n activation = np.expand_dims(x, axis=1)\n activations = [\n activation\n ] # A list for storing all the activations when doing forward propagation\n\n values = (\n []\n ) # A list for storing weight * x + bias values without applying the activation function.\n\n for weight, bias in zip(self.weights, self.biases):\n value = np.dot(weight, activation) + bias\n values.append(value)\n\n activation = self.sigmoid(value)\n activations.append(activation)\n\n \"\"\"\n Calculating the error delta from output layer to be propagated backwards in the network. It is calculated\n by taking the derivative of the loss function, which in our case is MSE, and multiply with derivate of\n the sigmoid function applied on the value that entered the last layer of the network.\n \"\"\"\n\n error_delta = (activations[-1] - y) * self.sigmoid_derivative(\n values[-1]\n )\n\n weights_gradient[-1] = np.dot(\n error_delta, activations[-2].T\n ) # Setting error delta multiplied with the second last layer activations as weight gradient for last layer.\n\n biases_gradient[-1] = error_delta # Setting error delta as bias gradient for last layer.\n\n \"\"\"\n This for-loop does the same as the code from line 128 - 136, but for each layer in the network.\n Thus, the error is propagated backwards in the network, and the gradients for each layer are set.\n \"\"\"\n for layer in range(2, self.total_layers):\n error_delta = np.dot(\n self.weights[-layer + 1].T, error_delta\n ) * self.sigmoid_derivative(values[-layer])\n\n weights_gradient[-layer] = np.dot(\n error_delta, activations[-layer - 1].T\n )\n\n biases_gradient[-layer] = error_delta\n\n self.weights = [\n weight - self.lr * weight_gradient\n for weight, weight_gradient in zip(self.weights, weights_gradient)\n ] # Updating the weights of the network by w_i - learning_rate * nabla w_i (w_i is the weight matrix at layer i, and nabla w_i is weight gradient.)\n\n self.biases = [\n bias - self.lr * bias_gradient\n for bias, bias_gradient in zip(self.biases, biases_gradient)\n ] # Updating the biases of the network by b_i - learning_rate * nabla b_i (b_i is the bias vector at layer i, and nabla b_i is weight gradient.)", "def train(self, inputs, desired):\n inputs.append(1) # bias input\n guess = self.feedforward(inputs)\n error = desired - guess\n for i in range(len(self.weights)):\n self.weights[i] = self.weights[i] + \\\n self.learning_rate * error * inputs[i]", "def setup(\n self,\n dim_data: int,\n neural_net: ModelBase,\n optimizer: optax.OptState,\n ):\n # neural network\n self.state_neural_net = neural_net.create_train_state(\n self.rng, optimizer, dim_data\n )\n\n # step function\n self.step_fn = self._get_step_fn()", "def train():\n import training\n\n # Ensure output directories exist\n os.makedirs(os.path.dirname(cfg.scaler_path), exist_ok=True)\n os.makedirs(cfg.model_path, exist_ok=True)\n os.makedirs(cfg.log_path, exist_ok=True)\n\n # Load (standardized) input data and target values\n tr_x, tr_y, _ = _load_data(cfg.training_set, is_training=True)\n val_x, val_y, _ = _load_data(cfg.validation_set)\n\n # Try to create reproducible results\n np.random.seed(cfg.initial_seed)\n\n # Save free parameters to disk\n utils.log_parameters(cfg.training, os.path.join(cfg.model_path,\n 'parameters.json'))\n\n training.train(tr_x, tr_y, val_x, val_y)", "def train(self, **kwargs):\n self.solver.train(**kwargs)", "def train(self, *args, **kwargs):\n # Handle overload of train() method\n if len(args) < 1 or (len(args) == 1 and type(args[0]) == bool):\n return nn.Sequential.train(self, *args, **kwargs)\n\n #\n # Parse training arguments\n #\n\n training_data = args[0]\n arguments = {\n \"validation_data\": None,\n \"batch_size\": 256,\n \"sigma_noise\": None,\n \"adversarial_training\": False,\n \"delta_at\": 0.01,\n \"initial_learning_rate\": 1e-2,\n \"momentum\": 0.0,\n \"convergence_epochs\": 5,\n \"learning_rate_decay\": 2.0,\n \"learning_rate_minimum\": 1e-6,\n \"maximum_epochs\": 1,\n \"training_split\": 0.9,\n \"gpu\": False,\n \"optimizer\": None,\n \"learning_rate_scheduler\": None\n }\n argument_names = arguments.keys()\n for a, n in zip(args[1:], argument_names):\n arguments[n] = a\n for k in kwargs:\n if k in arguments:\n arguments[k] = kwargs[k]\n else:\n raise ValueError(\"Unknown argument to {}.\".print(k))\n\n validation_data = arguments[\"validation_data\"]\n batch_size = arguments[\"batch_size\"]\n sigma_noise = arguments[\"sigma_noise\"]\n adversarial_training = arguments[\"adversarial_training\"]\n delta_at = arguments[\"delta_at\"]\n initial_learning_rate = arguments[\"initial_learning_rate\"]\n convergence_epochs = arguments[\"convergence_epochs\"]\n learning_rate_decay = arguments[\"learning_rate_decay\"]\n learning_rate_minimum = arguments[\"learning_rate_minimum\"]\n maximum_epochs = arguments[\"maximum_epochs\"]\n training_split = arguments[\"training_split\"]\n gpu = arguments[\"gpu\"]\n momentum = arguments[\"momentum\"]\n optimizer = arguments[\"optimizer\"]\n learning_rate_scheduler = arguments[\"learning_rate_scheduler\"]\n\n #\n # Determine device to use\n #\n if torch.cuda.is_available() and gpu:\n device = torch.device(\"cuda\")\n else:\n device = torch.device(\"cpu\")\n self.to(device)\n\n #\n # Handle input data\n #\n try:\n x, y = handle_input(training_data, device)\n training_data = BatchedDataset((x, y), batch_size)\n except:\n pass\n\n self.train()\n if not optimizer:\n self.optimizer = optim.SGD(\n self.parameters(), lr=initial_learning_rate, momentum=momentum\n )\n else:\n self.optimizer = optimizer\n self.criterion.to(device)\n\n if not optimizer and not learning_rate_scheduler:\n scheduler = ReduceLROnPlateau(\n self.optimizer,\n factor=1.0 / learning_rate_decay,\n patience=convergence_epochs,\n min_lr=learning_rate_minimum,\n )\n else:\n scheduler = learning_rate_scheduler\n\n training_errors = []\n validation_errors = []\n\n #\n # Training loop\n #\n\n for i in range(maximum_epochs):\n err = 0.0\n n = 0\n for j, (x, y) in enumerate(training_data):\n\n x = x.to(device)\n y = y.to(device)\n\n shape = x.size()\n shape = (shape[0], 1) + shape[2:]\n y = y.reshape(shape)\n\n self.optimizer.zero_grad()\n y_pred = self(x)\n c = self.criterion(y_pred, y)\n c.backward()\n self.optimizer.step()\n\n err += c.item() * x.size()[0]\n n += x.size()[0]\n\n if adversarial_training:\n self.optimizer.zero_grad()\n x_adv = self._make_adversarial_samples(x, y, delta_at)\n y_pred = self(x)\n c = self.criterion(y_pred, y)\n c.backward()\n self.optimizer.step()\n\n if j % 100:\n print(\n \"Epoch {} / {}: Batch {} / {}, Training error: {:.3f}\".format(\n i, maximum_epochs, j, len(training_data), err / n\n ),\n end=\"\\r\",\n )\n\n # Save training error\n training_errors.append(err / n)\n\n lr = [group[\"lr\"] for group in self.optimizer.param_groups][0]\n\n val_err = 0.0\n if not validation_data is None:\n n = 0\n for x, y in validation_data:\n x = x.to(device).detach()\n y = y.to(device).detach()\n\n shape = x.size()\n shape = (shape[0], 1) + shape[2:]\n y = y.reshape(shape)\n\n y_pred = self(x)\n c = self.criterion(y_pred, y)\n\n val_err += c.item() * x.size()[0]\n n += x.size()[0]\n validation_errors.append(val_err / n)\n\n print(\n \"Epoch {} / {}: Training error: {:.3f}, Validation error: {:.3f}, Learning rate: {:.5f}\".format(\n i,\n maximum_epochs,\n training_errors[-1],\n validation_errors[-1],\n lr,\n )\n )\n if scheduler:\n scheduler.step()\n else:\n scheduler.step()\n print(\n \"Epoch {} / {}: Training error: {:.3f}, Learning rate: {:.5f}\".format(\n i, maximum_epochs, training_errors[-1], lr\n )\n )\n\n self.training_errors += training_errors\n self.validation_errors += validation_errors\n self.eval()\n return {\n \"training_errors\": self.training_errors,\n \"validation_errors\": self.validation_errors,\n }", "def train(self, _inputs, label, learning_rate):\n\n current_guess = self.predict(_inputs)\n err = label - current_guess\n\n for i in range(self.input_size):\n self.weights[i] += err*_inputs[i]*learning_rate", "def train(self, train_set, shuffle_batch=True,\n epochs=25, lr_decay=0.95, sqr_norm_lim=9,labels=None,model=None): \n cost = self.negative_log_likelihood(self.y) \n dropout_cost = self.dropout_negative_log_likelihood(self.y)\n # adadelta upgrades: dict of variable:delta\n grad_updates = self.sgd_updates_adadelta(dropout_cost, lr_decay, 1e-6, sqr_norm_lim)\n # shuffle dataset and assign to mini batches.\n # if dataset size is not a multiple of batch size, replicate \n # extra data (at random)\n np.random.seed(3435)\n batch_size = self.batch_size\n if train_set.shape[0] % batch_size > 0:\n extra_data_num = batch_size - train_set.shape[0] % batch_size\n #extra_data = train_set[np.random.choice(train_set.shape[0], extra_data_num)]\n perm_set = np.random.permutation(train_set) \n extra_data = perm_set[:extra_data_num]\n new_data = np.append(train_set, extra_data, axis=0)\n else:\n new_data = train_set\n \n shuffled_data = np.random.permutation(new_data) # Attardi\n n_batches = shuffled_data.shape[0]/batch_size\n # divide train set into 90% train, 10% validation sets\n n_train_batches = int(np.round(n_batches*0.8))\n n_val_batches = n_batches - n_train_batches\n train_set = shuffled_data[:n_train_batches*batch_size,:]\n val_set = shuffled_data[n_train_batches*batch_size:,:] \n # push data to gpu \n # the dataset has the format [word_indices,padding,user,label]\n train_set_x, train_set_y = shared_dataset(train_set[:,:-2], train_set[:,-1]) \n train_set_u = theano.shared(np.asarray(train_set[:,-2],dtype='int32')) \n val_set_x = val_set[:,:-2]\n val_set_u = val_set[:,-2]\n val_set_y = val_set[:,-1]\n # val_set_x, val_set_y = shared_dataset(val_set[:,:-1], val_set[:,-1])\n batch_start = self.index * batch_size\n batch_end = batch_start + batch_size\n\n \n \n # compile Theano functions to get train/val/test errors\n \n # val_model = theano.function([self.index], self.errors(self.y),\n # givens={\n # self.x: val_set_x[batch_start:batch_end],\n # self.y: val_set_y[batch_start:batch_end]},\n # allow_input_downcast=True)\n\n # errors on train set\n if self.Users is not None:\n train_model = theano.function([self.index], cost, updates=grad_updates,\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end],\n self.u: train_set_u[batch_start:batch_end]\n },\n allow_input_downcast = True)\n\n train_error = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end],\n self.u: train_set_u[batch_start:batch_end]},\n allow_input_downcast=True)\n else:\n train_model = theano.function([self.index], cost, updates=grad_updates,\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end]},\n allow_input_downcast = True)\n\n train_error = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end]},\n allow_input_downcast=True)\n\n #FIXME: this is a bit weird\n test_y_pred = self.predict(val_set_x)\n make_preds = theano.function([self.x], test_y_pred, allow_input_downcast=True)\n\n test_error = T.mean(T.neq(test_y_pred, self.y))\n test_model = theano.function([self.x, self.y], test_error, allow_input_downcast=True)\n\n\n # start training over mini-batches\n print 'training...' \n best_val_perf = 0\n test_perf = 0 \n patience = 5\n drops = 0\n prev_val_perf = 0 \n for epoch in xrange(epochs):\n start_time = time.time()\n # FIXME: should permute whole set rather than minibatch indexes\n if shuffle_batch:\n for minibatch_index in np.random.permutation(range(n_train_batches)):\n cost_epoch = train_model(minibatch_index)\n self.set_zero(self.zero_vec) # CHECKME: Why?\n else:\n for minibatch_index in xrange(n_train_batches):\n cost_epoch = train_model(minibatch_index) \n self.set_zero(self.zero_vec)\n train_losses = [train_error(i) for i in xrange(n_train_batches)]\n train_perf = 1 - np.mean(train_losses)\n # test_loss = test_model(val_set_x, val_set_y)\n # test_perf = 1 - test_loss \n # predz = make_preds(val_set_x)\n # val_perf = FmesSemEval(predz, val_set_y, pos_ind, neg_ind)\n val_perf = 0\n info = 'epoch: %i\\%i (%.2f secs) train acc: %.2f %% | val avg fmes: %.2f %%' % (\n epoch,epochs, time.time()-start_time, train_perf * 100., val_perf*100.) \n # from ipdb import set_trace; set_trace()\n if val_perf > prev_val_perf: \n drops=0\n if val_perf >= best_val_perf:\n best_val_perf = val_perf\n info+= \" **\"\n if model:\n # print \"save model\"\n self.save(model)\n # test_loss = test_wmodel(val_set_x, val_set_y)\n # test_perf = 1 - test_loss \n # predz = make_preds(val_set_x)\n # fmes = FmesSemEval(predz, val_set_y, pos_ind, neg_ind)\n # print predz\n # print test_set_y\n # print \"Test performance acc: %.3f | polar fmes:%.3f \" % (test_perf,fmes)\n else: \n drops+=1\n if drops >= patience:\n print \"Ran out of patience...\"\n break\n prev_val_perf = val_perf\n print info\n return test_perf", "def train(network, training_dataloader, batch_size, epochs):\n trainer = gluon.Trainer(network.collect_params(), 'adam',\n {'learning_rate': 0.002})\n metric = mx.metric.Accuracy()\n \n for epoch in range(epochs):\n train_loss =0.\n for data,label in training_dataloader:\n \n# print (data.shape)\n# print (label.shape)\n with autograd.record():\n output = network(data)\n loss=mx.ndarray.softmax_cross_entropy(output,label)\n loss.backward()\n\n trainer.step(batch_size)\n train_loss += loss.mean().asscalar()\n metric.update(label, output)\n \n print (epoch , metric.get()[1]) \n training_accuracy = metric.get()[1]\n return network, training_accuracy", "def network_training(batch_size, cost, iterations, data_input, label_input, minimization_step, pred, sess,\n test_labels, test_vec, training_labels, training_vec):\n\n # Define list to keep track of the loss and accuracy\n all_costs = []\n all_test_costs = []\n all_accuracy = [0]\n all_test_accuracy = [0]\n\n # Set the range for looping to train the neural network\n for i in range(iterations + 1):\n # To prevent slicing will be out of range\n offset = int(i * batch_size % len(training_vec))\n\n # Epoch wise training data shuffling\n if offset + batch_size >= len(training_vec):\n training_vec, training_labels = shuffled_copies(training_vec, training_labels)\n test_vec, test_labels = shuffled_copies(test_vec, test_labels)\n\n batch_samples = training_vec[offset:offset + batch_size]\n batch_labels = training_labels[offset:offset + batch_size]\n\n # Calculate for every iteration the training and test loss\n _, current_cost = sess.run([minimization_step, cost],\n # feed_dict={data_input: training_vec,\n # label_input: training_labels})\n feed_dict={data_input: batch_samples,\n label_input: batch_labels})\n\n test_cost = sess.run(cost, feed_dict={data_input: test_vec,\n label_input: test_labels})\n\n # Get the accuracy for the first 100 iterations in step of 5, because this is a steep part in the graph\n if i in range(0, 101) and i % 5 == 0:\n accuracy_sess = get_accuracy(data_input, label_input, pred, sess, training_labels, training_vec,\n offset=offset, batch_size=batch_size)\n accuracy_sess_test = get_accuracy(data_input, label_input, pred, sess, test_labels, test_vec)\n\n # Thereafter get the accuracy for every 20th iteration\n if i in range(102, iterations + 1) and i % 20 == 0:\n accuracy_sess = get_accuracy(data_input, label_input, pred, sess, training_labels, training_vec,\n offset=offset, batch_size=batch_size)\n accuracy_sess_test = get_accuracy(data_input, label_input, pred, sess, test_labels, test_vec)\n\n # Check every 50th loop how well the prediction is\n if i % 50 == 0:\n print(\"accuracy : {0:.2f}%\".format(accuracy_sess))\n print(\"STEP: {} | Cost: {}\".format(i, current_cost))\n\n # Add the los and accuracy to the defined lists\n all_costs.append(current_cost)\n all_test_costs.append(test_cost)\n all_accuracy.append(accuracy_sess)\n all_test_accuracy.append(accuracy_sess_test)\n\n return all_accuracy, all_costs, all_test_accuracy, all_test_costs, test_labels, test_vec, training_labels, \\\n training_vec", "def _create_train_op(self):\n self.lr = self.learning_rate\n # global_step = tf.train.get_or_create_global_step()\n learning_rate = tf.constant(value=self.learning_rate, shape=[], dtype=tf.float32)\n learning_rate =tf.train.exponential_decay(learning_rate,self.global_step,2*self.num_warm_up,0.96,staircase=True,name=\"exponential_decay\")\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step/num_warmup_steps * init_lr`.\n if self.num_warm_up:\n global_steps_int = tf.cast(self.global_step, tf.int32)\n warmup_steps_int = tf.constant(self.num_warm_up, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float / warmup_steps_float\n warmup_learning_rate = self.learning_rate * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n self.current_learning_rate = learning_rate\n if self.optim_type == 'adagrad':\n self.optimizer = tf.train.AdagradOptimizer(self.lr)\n elif self.optim_type == 'adam':\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)\n elif self.optim_type == 'rprop':\n self.optimizer = tf.train.RMSPropOptimizer(self.lr)\n elif self.optim_type == 'sgd':\n self.optimizer = tf.train.GradientDescentOptimizer(self.lr)\n elif self.optim_type == \"bert\":\n self.optimizer = AdamWeightDecayOptimizer(learning_rate=learning_rate, weight_decay_rate=0.01, beta_1=0.9,\n beta_2=0.999, epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n else:\n raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type))\n\n self.logger.info(\"applying optimize %s\" % self.optim_type)\n if self.clip_weight:\n # clip_weight\n tvars = tf.trainable_variables()\n grads = tf.gradients(self.loss, tvars)\n grads, _ = tf.clip_by_global_norm(grads, clip_norm=self.max_norm_grad)\n grad_var_pairs = zip(grads, tvars)\n train_op = self.optimizer.apply_gradients(grad_var_pairs, name='apply_grad', global_step=self.global_step)\n new_global_step = self.global_step + 1\n train_op = tf.group(train_op, [self.global_step.assign(new_global_step)])\n self.train_op = train_op\n else:\n self.train_op = self.optimizer.minimize(self.loss, global_step=self.global_step)", "def nn(data):\n training_set = SupervisedDataSet*\n\n\n input_nodes = 3\n hidden_layer_1 = 10\n hidden_layer_2 = 10\n output_layer = 5\n\n net = buildNetwork(input_nodes, hidden_layer_1, hidden_layer_2, output_layer, bias=True, hiddenclass=TanhLayer)", "def train(self, data_dict, label_dict):\n loaders = self.init_loaders(data_dict, label_dict)\n best_performance = 1e18\n loss_dict = self.init_loss_dict()\n performance_dict = self.init_performance_dict()\n\n for epoch in range(self.config_dict[\"num_epochs\"]):\n print(\"Epoch {}/{}\".format(epoch, self.config_dict[\"num_epochs\"] - 1))\n print(\"-\" * 10)\n\n if self.scheduler is not None:\n self.scheduler.step()\n\n for phase in [\"train\", \"val\"]:\n self.model.train(phase == \"train\")\n running_loss_dict = self.init_running_loss_dict(\n list(loss_dict[phase].keys())\n )\n output_dict = self.init_output_dict()\n i = 0\n for the_data in loaders[phase]:\n i += 1\n batch_loss_dict = {}\n inputs, labels = self.transform_batch(the_data)\n\n # zero parameter gradients\n self.optimizer.zero_grad()\n\n # forward\n outputs = self.model(inputs)\n\n output_dict = self.update_output_dict(output_dict, outputs, labels)\n\n batch_loss_dict[\"loss\"] = self.criterion(outputs, labels)\n if phase == \"train\":\n batch_loss_dict[\"loss\"].backward()\n self.optimizer.step()\n\n for key in batch_loss_dict.keys():\n running_loss_dict[key] += batch_loss_dict[key].item()\n\n # Compute epoch losses and update loss dict\n epoch_loss_dict = {\n key: running_loss_dict[key] / i for key in running_loss_dict.keys()\n }\n loss_dict[phase] = self.update_metric_dict(\n loss_dict[phase], epoch_loss_dict\n )\n\n # Compute epoch performance and update performance dict\n epoch_statistics = self.compute_epoch_statistics(output_dict)\n performance_dict[phase] = self.update_metric_dict(\n performance_dict[phase], epoch_statistics\n )\n\n print(\"Phase: {}:\".format(phase))\n self.print_metric_dict(epoch_loss_dict)\n self.print_metric_dict(epoch_statistics)\n\n if phase == \"val\":\n best_model_condition = epoch_loss_dict[\"loss\"] < best_performance\n if best_model_condition:\n print(\"Best model updated\")\n best_performance = epoch_loss_dict[\"loss\"]\n best_model_wts = copy.deepcopy(self.model.state_dict())\n\n print(\"Best val performance: {:4f}\".format(best_performance))\n self.model.load_state_dict(best_model_wts)\n result_dict = {\n phase: {**performance_dict[phase], **loss_dict[phase]}\n for phase in performance_dict.keys()\n }\n return result_dict", "def train(self, data_iterator):\n \n if self.config['sequence_input']:\n if self.config['net_input_add_onehot']:\n input_data_ph = tf.placeholder(tf.uint8, shape=(self.config['batch_size'], self.config['timesteps']))\n else:\n input_data_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['timesteps'], self.config['num_input']))\n else:\n input_data_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['num_input']))\n \n if self.config['sequence_output']:\n if self.config['net_target_add_onehot']:\n target_ph = tf.placeholder(tf.uint8, shape=(self.config['batch_size'], self.config['timesteps']))\n else:\n target_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['timesteps'], self.config['num_output']))\n else:\n target_ph = tf.placeholder(tf.float32, shape=(self.config['batch_size'], self.config['num_output']))\n \n training, loss_avg_t = self.setup_train(input_data_ph, target_ph)\n \n session = tf.Session()\n session.run(tf.global_variables_initializer())\n \n self.analyze_config()\n \n for epoch in range(self.config['epochs']):\n starttime = time.time()\n for step in range(self.config['epoch_steps']):\n input_data, target = next(data_iterator)\n tmp, loss_avg_value = session.run([training, loss_avg_t], {input_data_ph:input_data, target_ph:target})\n print(\"Epoch: {} Loss: {} Elapsed:{}s\".format(epoch, loss_avg_value, (time.time() - starttime)))", "def train(self, inputs, targets, learningrate):\n\n i = np.array(np.asfarray(inputs), ndmin=2).T\n t = np.array(np.asfarray(targets), ndmin=2).T\n\n o = [i]\n for weight in self.weights:\n i = sigmoid(np.dot(weight, i))\n o.append(i)\n\n # Calculate errors\n e = [t - i]\n laste = t - i\n for index in range(len(self.weights) - 1):\n e.append(np.dot(self.weights[(len(self.weights) - 1) - index].T, laste))\n laste = np.dot(self.weights[(len(self.weights) - 1) - index].T, laste)\n\n # Adjust weights\n for index in range(len(self.weights)):\n self.weights[index] += learningrate * np.dot((e[len(e) - (index + 1)] * o[index + 1] * (1.0 - o[index + 1])), o[index].T)", "def _add_train_op(self):\n # Take gradients of the trainable variables w.r.t. the loss function to minimize\n loss_to_minimize = self._loss\n tvars = tf.trainable_variables()\n gradients = tf.gradients(loss_to_minimize, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE)\n\n # Clip the gradients\n with tf.device(\"/gpu:%d\"%(config.gpu_selection)):\n grads, global_norm = tf.clip_by_global_norm(gradients, config.max_grad_norm)\n\n # Add a summary\n tf.summary.scalar('global_norm', global_norm)\n\n # Apply adagrad optimizer\n optimizer = tf.train.AdagradOptimizer(config.lr, initial_accumulator_value=config.adagrad_init_acc)\n #optimizer = tf.train.MomentumOptimizer(config.lr, momentum=0.01)\n with tf.device(\"/gpu:%d\"%(config.gpu_selection)):\n self._train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step, name='train_step')", "def train(self):\n start_time = time()\n self.model.train()\n\n for step, sample in enumerate(self.train_loader):\n self.optimizer.zero_grad()\n\n x, _, _ = sample\n x = x.to(self.device)\n\n y_pred = self.model.forward(x)\n loss = nn.MSELoss()(y_pred, x)\n loss.backward()\n self.train_losses.append(loss.item())\n\n self.optimizer.step(None)\n\n # print an incredible progress bar\n print(f'\\r{self.progress_bar} │ Loss: {np.mean(self.train_losses):.6f}', end='')\n self.progress_bar.inc()\n\n # log average loss of this epoch\n mean_epoch_loss = np.mean(self.train_losses)\n self.sw.add_scalar(tag='train_loss', scalar_value=mean_epoch_loss, global_step=self.epoch)\n self.train_losses = []\n\n # log epoch duration\n print(f' │ T: {time() - start_time:.2f} s')", "def train(self, network, training_examples, learning_rate, \n reg_lambda=0, batch_size=1, passes=1):\n for _ in range(passes):\n random.shuffle(training_examples)\n for i, example in enumerate(training_examples):\n x = example[0]\n y = example[1]\n network.forward(x)\n network.backward(network.cost_gradient(y))\n if (i+1) % batch_size == 0:\n # update every batch_size examples\n network.update_parameters(learning_rate, batch_size, \n reg_lambda)\n network.reset_gradients()\n # update once more in case extra examples past last batch_update\n network.update_parameters(learning_rate, batch_size, reg_lambda)\n network.reset_gradients()", "def train_adam(total_loss, global_step):\n # Variables that affect learning rate.\n num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size\n decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)\n #decay_steps = 2000\n\n # Decay the learning rate exponentially based on the number of steps.\n lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,\n global_step,\n decay_steps,\n LEARNING_RATE_DECAY_FACTOR,\n staircase=True)\n tf.summary.scalar('learning_rate', lr)\n\n # Generate moving averages of all losses and associated summaries.\n loss_averages_op = _add_loss_summaries(total_loss)\n\n # Compute gradients.\n with tf.control_dependencies([loss_averages_op]):\n #opt = tf.train.GradientDescentOptimizer(lr)\n opt = tf.train.AdamOptimizer()\n grads = opt.compute_gradients(total_loss)\n\n # Apply gradients.\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n # Add histograms for trainable variables.\n for var in tf.trainable_variables():\n tf.summary.histogram(var.op.name, var)\n\n # Add histograms for gradients.\n for grad, var in grads:\n if grad is not None:\n tf.summary.histogram(var.op.name + '/gradients', grad)\n\n # Track the moving averages of all trainable variables.\n variable_averages = tf.train.ExponentialMovingAverage(\n MOVING_AVERAGE_DECAY, global_step)\n variables_averages_op = variable_averages.apply(tf.trainable_variables())\n\n with tf.control_dependencies([apply_gradient_op, variables_averages_op]):\n train_op = tf.no_op(name='train')\n\n return train_op", "def pcntrain(self,eta,nIterations,verbose=False): \n # Add the inputs that match the bias node\n inputs = np.concatenate((self.inputs,-np.ones((self.nData,1))),axis=1)\n targets = self.targets\n # Training\n change = range(self.nData)\n\n for n in range(nIterations):\n \n self.outputs = self._pcnfwd(inputs)\n self.weights += eta*np.dot(np.transpose(inputs),self.fd(self.outputs)*(targets-self.outputs))\n if verbose:\n print \"Iteration: \", n\n\t\tprint self.weights\n\t\t\t\n\t\tactivations = self._pcnfwd(inputs)\n\t\tprint \"Final outputs are:\"\n\t\tprint activations\n \n # Randomise order of inputs\n np.random.shuffle(change)\n inputs = inputs[change,:]\n targets = targets[change,:]", "def train(features, targets, weights, bias):\n # see gradient_descent for explanation\n epochs = 100\n learning_rate = 0.1\n\n picture_nb = 2\n\n # Print current accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n for epoch in range(epochs):\n if epoch % 10 == 0:\n # get normalized scores\n predictions = activation(pre_activation(features, weights, bias))\n # compare with targets to see how bad our algorithm is\n print(\"Cost = %s\" % cost(predictions, targets))\n # Replot graph. Check in create_dataset for explanation of parameters\n if picture_nb == 2:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='red')\n elif picture_nb == 11:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='green')\n else:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='orange')\n picture_nb+=1\n\n # Initialize gradients\n # weights_gradients is 2D array with 2 values\n weights_gradients = np.zeros(weights.shape)\n bias_gradient = 0\n # Go through each row\n for feature, target in zip(features, targets):\n # Compute prediction\n z = pre_activation(feature, weights, bias)\n # Get normalized score\n y = activation(z)\n # Update gradients based on formulas established before. Look at gradient_descent to understand what we\n # are doing. Also, the formulas are below, just before the call of the function train.\n weights_gradients += (y - target) * derivative_activation(z) * feature\n # no multiplication of feature because it does not depend on some coordinates.\n bias_gradient += (y - target) * derivative_activation(z)\n\n # Update variables. These are the lines that result the cost to get reduced.\n weights = weights - learning_rate * weights_gradients\n bias = bias - learning_rate * bias_gradient\n\n # Print final accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n plt.scatter(features[:, 0], features[:, 1], s=40, c=targets, cmap=plt.cm.Spectral)\n plt.savefig(\"DataPointsLineEvolution.png\")\n # legend for understanding\n plt.legend(['Original division', 'New division', 'New division', 'New division', 'New division', 'New division',\n 'New division', 'New division', 'New division', 'Final division'], loc='upper left')\n # save picture of data points drawn.\n plt.savefig(\"DataPointsLineEvolutionLegend.png\")", "def train():\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n model = MLP(n_hidden=dnn_hidden_units,n_classes=10,batch_size=FLAGS.batch_size, input_dim=32*32*3, \n weight_decay=FLAGS.weight_reg_strength, weight_scale=FLAGS.weight_init_scale)\n\n Datasets = utils.get_cifar10(data_dir = DATA_DIR_DEFAULT, one_hot = True, validation_size = 0)\n \n for i in range(1500): #(FLAGS.max_steps):\n train_batch = Datasets.train.next_batch(batch_size = FLAGS.batch_size)\n #Get the model output\n logits = model.inference(x=train_batch[0].reshape([FLAGS.batch_size,32*32*3]))\n #Get the loss and let the model set the loss derivative.\n loss = model.loss(logits=logits, labels=train_batch[1])\n #Perform training step\n model.train_step(loss=loss, flags=FLAGS)\n\n #Every 100th iteratin print accuracy on the whole test set.\n if i % 100 == 0:\n # for layer in model.layers:\n test_batch = Datasets.test.next_batch(batch_size = 200) #Datasets.test.num_examples\n logits = model.inference(x=test_batch[0].reshape([200,32*32*3]))\n print('-- Step: ', i, \" accuracy: \",model.accuracy(logits=logits,labels=test_batch[1]),'loss', loss )\n\n ########################\n # END OF YOUR CODE #\n #######################", "def train_classifier(train_data, dev_data, num_iterations, learning_rate, params):\n\n for I in xrange(num_iterations):\n cum_loss = 0.0 # total loss in this iteration.\n random.shuffle(train_data)\n for label, features in train_data:\n x = feats_to_vec(features) # convert features to a vector.\n y = utils.L2I[label] # convert the label to number if needed.\n loss, grads = ll.loss_and_gradients(x,y,params)\n cum_loss += loss\n # YOUR CODE HERE\n # update the parameters according to the gradients\n # and the learning rate.\n params[0] -= learning_rate * grads[0]\n params[1] -= learning_rate * grads[1]\n train_loss = cum_loss / len(train_data)\n train_accuracy = accuracy_on_dataset(train_data, params)\n dev_accuracy = accuracy_on_dataset(dev_data, params)\n print I, train_loss, train_accuracy, dev_accuracy\n return params", "def _train(self):\r\n lr, hr = self.sess.run(self.val_batch)\r\n res = self.sess.run(\r\n [self.train, self.merged,\r\n self.GAN.g_loss, self.GAN.mse_loss, self.GAN.g_gan_loss,\r\n self.GAN.d_loss, self.GAN.d_loss_real, self.GAN.d_loss_fake],\r\n feed_dict={\r\n self.GAN.g_images: lr,\r\n self.GAN.d_images: hr,\r\n self.GAN.is_training: True\r\n })\r\n\r\n return res[1:]", "def train():\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n # Get negative slope parameter for LeakyReLU\n neg_slope = FLAGS.neg_slope\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n import matplotlib.pyplot as plt\n\n data = cifar10_utils.get_cifar10(FLAGS.data_dir)\n train = data['train']\n test = data['test']\n dim_x = train.images.shape[1]*train.images.shape[2]*train.images.shape[3]\n\n mlp = MLP(dim_x, dnn_hidden_units, train.labels.shape[1], neg_slope)\n loss_module = CrossEntropyModule()\n\n loss_train = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n loss_test = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n accuracy_test = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n\n images_test = test.images\n labels_test = test.labels\n images_test = np.reshape(images_test, (images_test.shape[0], dim_x))\n\n for i in range(0, FLAGS.max_steps):\n if PRINTS:\n print('iter', i+1, end='\\r')\n images, labels = train.next_batch(FLAGS.batch_size) \n images = np.reshape(images, (images.shape[0], dim_x))\n\n pred = mlp.forward(images)\n loss = loss_module.forward(pred, labels)\n loss_grad = loss_module.backward(pred, labels)\n mlp.backward(loss_grad)\n\n for module in reversed(mlp.modules):\n if isinstance(module, LinearModule):\n module.params['weight'] -= 1/FLAGS.batch_size*FLAGS.learning_rate*module.grads['weight']\n module.params['bias'] -= 1/FLAGS.batch_size*FLAGS.learning_rate*module.grads['bias']\n if (i+1) % FLAGS.eval_freq == 0:\n pred_test = mlp.forward(images_test)\n loss_train[i // FLAGS.eval_freq] = loss\n accuracy_test[i // FLAGS.eval_freq] = accuracy(pred_test, labels_test)\n loss_test[i // FLAGS.eval_freq] = loss_module.forward(pred_test, labels_test)\n if PRINTS:\n print()\n print('test_loss:', loss_test[i // FLAGS.eval_freq])\n print('test_accuracy:', accuracy_test[i // FLAGS.eval_freq])\n print('train_loss:', loss_train[i // FLAGS.eval_freq])\n\n if PLOTS:\n fig, ax = plt.subplots(1, 2, figsize=(10,5))\n fig.suptitle('Training curves for Numpy MLP\\nFinal test accuracy: {:0.4f}, default configuration'.format(accuracy_test[i // FLAGS.eval_freq]))\n\n ax[0].set_title('Loss')\n ax[0].set_ylabel('Loss value')\n ax[0].set_xlabel('No of batches seen x{}'.format(FLAGS.eval_freq))\n ax[0].plot(loss_train, label='Train')\n ax[0].plot(loss_test, label='Test')\n ax[0].legend()\n\n ax[1].set_title('Accuracy')\n ax[1].set_ylabel('Accuracy value')\n ax[1].set_xlabel('No of batches seen x{}'.format(FLAGS.eval_freq))\n ax[1].plot(accuracy_test, label='Test')\n ax[1].legend()\n plt.show()\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def add_train_op(self, loss):\n optimizer = tf.train.AdamOptimizer(self.lr)\n self.train_op = optimizer.minimize(loss)", "def train(self):\n self.target_update_frequency = 10000 # How often to update target network\n\n stash_model = True # Flag for stashing a copy of the model\n model_stash_interval = 10 # Stashes a copy of the model this often\n\n # Number of steps to take before training. Allows buffer to partially fill.\n # Must be at least batch_size to avoid error when sampling from experience replay\n self.pre_training_steps = 10*self.batch_size\n assert(self.pre_training_steps <= self.buffer_size), \"Replay not large enough for pre-training!\"\n assert(self.pre_training_steps >= self.batch_size), \"Buffer not allowed to fill enough before sampling!\"\n # Number of steps to force learner to observe submitted actions, rather than submit its own actions\n self.observations = 2000\n\n self.epsilon = 0.5 # Initial probability of letting the learner submit its own action\n self.eps_decay_rate = 1./(25*20*len(self.training_data)) # Rate at which epsilon decays per submission\n\n lr_decay_freq = 10 # Decay learning rate after a set number of epochs\n min_learning_rate = 1.e-8 # Minimum learning rate allowed to decay to\n\n summaries = {}\n summaries[\"loss\"] = []\n summaries[\"train_acc\"] = []\n summaries[\"val_acc\"] = []\n # Load existing model\n self.ddq_net.sess.run(self.ddq_net.online_ops[\"init\"])\n if(self.load_path):\n self.ddq_net.load(self.load_path)\n print(\"\\nCheckpoint loaded from {}\".format(self.load_path))\n\n # Initialize target network\n self.ddq_net.sess.run(self.ddq_net.target_ops[\"target_init\"])\n\n for self.epoch_count in range(self.n_epoch):\n t0 = time.time()\n learning_rate = self.ddq_net.online_ops[\"learning_rate\"].eval(self.ddq_net.sess)\n if((self.epoch_count>0) and (self.epoch_count % lr_decay_freq == 0) and (learning_rate>= min_learning_rate)):\n # Decay learning rate accoring to schedule\n learning_rate = 0.5*learning_rate\n self.ddq_net.sess.run(self.ddq_net.online_ops[\"learning_rate\"].assign(learning_rate))\n\n # Run single epoch of training\n loss, train_acc, val_acc = self.train_epoch()\n dt = time.time()-t0\n\n print(\" Finished epoch {:2}/{}: lr: {:.4e}, dt {:.2f}, loss {:.6f}, train {:.6f}, val {:.6f}\".format(self.epoch_count+1, self.n_epoch, learning_rate, dt, loss, train_acc, val_acc), flush=True)\n summaries[\"loss\"].append(loss)\n summaries[\"train_acc\"].append(train_acc)\n summaries[\"val_acc\"].append(val_acc)\n\n if(stash_model):\n if(self.epoch_count>0 and (self.epoch_count+1)%model_stash_interval==0):\n # Stash a copy of the current model\n out_path = \"tmp/models/{}_model_E{}.ckpt\".format(self.ddq_net._name, self.epoch_count+1)\n self.ddq_net.save(path=out_path)\n print(\"Stashed a copy of the current model in {}\".format(out_path))\n\n self.ddq_net.save(path=self.ddq_net._path_to_model)\n return summaries", "def train_net(net, data_loader, iter_num, optimizer, criterion, acc_loader):\n print \"+----+--------+----------+----------+---------+\"\n print \"| it | loss | time (s) | dev_loss | dev_acc |\"\n print \"+----+--------+----------+----------+---------+\"\n for i in range(iter_num):\n cum_loss = 0.0\n start_time = time.time()\n\n for _, (inputs, labels) in enumerate(data_loader, 0): # go over all examples\n inputs, labels = Variable(inputs), Variable(labels)\n optimizer.zero_grad()\n outputs = net(inputs) # compute output of net\n loss = criterion(outputs, labels) # compute loss\n loss.backward() # compute grads\n optimizer.step() # update parameters\n cum_loss += loss.data[0]\n acc, loss = accuracy_and_loss_on(net, acc_loader, criterion) # compute accuracy in each iteration\n print \"| %-2d | %1.4f | %8.5f | %f | %5.2f %% |\" % (\n i, cum_loss / len(data_loader), time.time() - start_time, loss, acc * 100)\n print \"+----+--------+----------+----------+---------+\"", "def funcs(dataset, network, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, sparsity=0.02, beta=0.01, momentum=MOMENTUM):\n\n # symbolic variables \n X_batch = T.matrix()\n y_batch = T.matrix()\n\n layers = lasagne.layers.get_all_layers(network)\n num_layers = len(layers)\n\n code_layer = layers[num_layers/2]\n activations_2_layer = layers[num_layers/2 - 1]\n activations_1_layer = layers[num_layers/2 - 2]\n\n # code output \n code_output = lasagne.layers.get_output(code_layer, X_batch, deterministic=True)\n\n l = T.sub(1,code_output)\n ll = T.mul(code_output,l)\n L = T.mul(4,ll)\n L = L.mean()\n\n rho_hat = T.mean(code_output,axis=1)\n # L = T.sum(sparsity * T.log(sparsity/rho_hat) + (1 - sparsity) * T.log((1 - sparsity)/(1 - rho_hat)))\n\n # reg = 0.0001*lasagne.regularization.l2(network)\n # this is the cost of the network when fed throught the noisey network\n train_output = lasagne.layers.get_output(network, X_batch)\n cost = lasagne.objectives.mse(train_output, y_batch) \n cost = cost.mean() + beta * L\n\n all_params = lasagne.layers.get_all_params(network)\n updates = lasagne.updates.nesterov_momentum(cost, all_params, learning_rate, momentum)\n\n \n\n # code and activation outputs\n \n activations_1_output = lasagne.layers.get_output(activations_1_layer, X_batch, deterministic=True)\n activations_2_output = lasagne.layers.get_output(activations_2_layer, X_batch, deterministic=True)\n\n train = theano.function(inputs=[X_batch, y_batch], outputs=cost, updates=updates, allow_input_downcast=True)\n code = theano.function(inputs=[X_batch], outputs=code_output, allow_input_downcast=True)\n activations_1 = theano.function(inputs=[X_batch], outputs=activations_1_output, allow_input_downcast=True)\n activations_2 = theano.function(inputs=[X_batch], outputs=activations_2_output, allow_input_downcast=True)\n\n return dict(\n train=train,\n code=code,\n activations_1=activations_1,\n activations_2=activations_2\n )", "def test_n_and_train(self):\r\n\r\n n = NeuronNetwork(1,\r\n [1],\r\n [[[0.0,0.0]]],\r\n [[0.0]])\r\n\r\n inputs = [[0,0], [0,1], [1,0], [1,1]]\r\n targets = [[0], [0], [0], [1]]\r\n\r\n n.train(inputs,targets,1000,180)\r\n\r\n print(n)\r\n self.assertLess(n.feed_forward([0,0]), [0.001])\r\n self.assertGreater(n.feed_forward([1,0]), [0.001])\r\n self.assertGreater(n.feed_forward([0,1]), [0.001])\r\n self.assertGreater(n.feed_forward([1,1]), [0.9])", "def train(self):\n # For debugging\n torch.autograd.set_detect_anomaly(True)\n # Move variables to device if haven't done so\n self.user_features = self.move_to_cuda(self.user_features, self.device)\n self.item_features = self.move_to_cuda(self.item_features, self.device)\n self.labels = self.move_to_cuda(self.labels, self.device)\n self.model = self.model.to(self.device)\n # Pretrain\n if self.pretrain_bm > 0:\n self.pretrain_bm_net(self.pretrain_bm)\n if self.pretrain_nc > 0:\n self.pretrain_nc_net(self.pretrain_nc)\n # optimizers\n optims = MultipleOptimizer(torch.optim.Adam(self.model.bm_net.parameters(), lr=self.lr),\n torch.optim.Adam(self.model.nc_net.parameters(), lr=self.lr, weight_decay=self.weight_decay))\n # optims = torch.optim.Adam(self.model.parameters(), lr = self.lr)\n\n # criterion = nn.CrossEntropyLoss()\n criterion = F.nll_loss\n best_test_auc = 0.\n best_val_auc = 0.\n best_res = None\n cnt_wait = 0\n patience = 50\n # Training...\n for epoch in range(self.n_epochs):\n self.model.train()\n self.model.zero_grad()\n input_adj = self.adj.clone()\n input_adj = input_adj.to(self.device)\n nc_logits, modified_adj, bm_loss, nc_loss = self.model(input_adj, self.user_features, self.item_features, self.n_epochs, epoch)\n loss = nc_loss + bm_loss * self.alpha\n optims.zero_grad()\n loss.backward()\n # for name, params in self.model.named_parameters():\n # \tif params.requires_grad:\n # \t\tprint(f'{name}: requires grad')\n # \t\tprint(torch.sum(params.grad))\n optims.step()\n # Computation Graph\n # Validation\n self.model.eval()\n with torch.no_grad():\n # input_adj = self.adj.clone()\n # input_adj = input_adj.to(self.device)\n # nc_logits_eval_original, _ = self.model.nc_net(input_adj, self.user_features, self.item_features)\n # input_adj = self.adj.clone()\n # input_adj = input_adj.to(self.device)\n nc_logits_eval_modified, _, _, _ = self.model(input_adj, self.user_features, self.item_features, self.n_epochs, epoch)\n training_res = self.eval_node_cls(nc_logits[self.train_nid].detach(), self.labels[self.train_nid], self.n_classes)\n # res = self.eval_node_cls(nc_logits_eval_original[self.val_nid], self.labels[self.val_nid], self.n_classes)\n res_modified = self.eval_node_cls(nc_logits_eval_modified[self.val_nid], self.labels[self.val_nid], self.n_classes)\n if res_modified['auc'] > best_val_auc:\n cnt_wait = 0\n best_val_auc = res_modified['auc']\n # res_test = self.eval_node_cls(nc_logits_eval_original[self.test_nid], self.labels[self.test_nid], self.n_classes)\n res_test_modified = self.eval_node_cls(nc_logits_eval_modified[self.test_nid], self.labels[self.test_nid], self.n_classes)\n if res_test_modified['auc'] > best_test_auc:\n best_test_auc = res_test_modified['auc']\n best_res = res_test_modified\n self.logger.info('Eland Training, Epoch [{}/{}]: loss {:.4f}, train_auc: {:.4f}, val_auc {:.4f}, test_auc {:.4f}, test_ap {:.4f}'\n .format(epoch+1, self.n_epochs, loss.item(), training_res['auc'], res_modified['auc'], res_test_modified['auc'], res_test_modified['ap']))\n else:\n cnt_wait += 1\n self.logger.info('Eland Training, Epoch [{}/{}]: loss {:.4f}, train_auc: {:.4f}, val_auc {:.4f}'\n .format(epoch+1, self.n_epochs, loss.item(), training_res['auc'], res_modified['auc']))\n\n if cnt_wait >= patience:\n self.logger.info('Early stop!')\n break\n self.logger.info('Best Test Results: auc {:.4f}, ap {:.4f}'.format(best_res['auc'], best_res['ap']))\n\n return best_res['auc'], best_res['ap']", "def train(self, x, y, epochs=1, verbose=False):\n numTrainingPoints = len(x)\n for e in range(epochs):\n # Set accuracy at beginning of epochs to 0s\n accuracy = 0\n # Compute the output for all training points\n allOutputs = self.getOutputNoActivation(x)\n for i in range(numTrainingPoints):\n # Increment iterations for learning rate scheduling\n self.iterations += 1\n # Calculate the new learning rate from scheduling\n lr = self.iterations ** -1\n # Grab the input for the specific training point\n trainingPointInputs = x[i]\n # Grab the output for the specific training point\n trainingPointOutput = allOutputs[i]\n # Get the target outputs for the specific training point\n targets = y[i]\n # Compare each output 1 by 1\n for outputIndex in range(len(trainingPointOutput)):\n # Grab specific output and corresponding target value\n targetVal = targets[outputIndex]\n outputVal = trainingPointOutput[outputIndex]\n # If the outputs match, increment accuracy\n if targetVal == outputVal:\n accuracy += 1\n continue\n # Else, update the weights\n else:\n # For each input weight, compute its delta change, and then apply the change\n for inputWeightIndex in range(len(self.weights[outputIndex])):\n # If the inputWeightIndex is in the range of values for inputs, use the input at that index\n if inputWeightIndex < len(trainingPointInputs):\n trainingPointInput = trainingPointInputs[inputWeightIndex]\n # Else, that value is the bias, and the input should be constant 1.0\n else:\n trainingPointInput = 1.0\n # Compute delta w and apply the change\n inputNorm = 0\n for tpi in trainingPointInputs:\n inputNorm += tpi ** 2\n inputNorm = math.sqrt(inputNorm)\n deltaW = lr * (targetVal - outputVal) * trainingPointInput / inputNorm**2\n self.weights[outputIndex, inputWeightIndex] += deltaW\n # Compute accuracy\n accuracy /= numTrainingPoints \n # If verbose == True, print accuuracy for each training epoch\n if verbose:\n print('Epoch ' + str(e+1) + ' / ' + str(epochs) + ' Accuracy: ' + str(accuracy))\n # Return final accuracy\n return accuracy", "def train(self, training, epochs, group):\n for epoch in range(epochs):\n self.input_matrix={}\n self.back_propagation_learning(training)\n acc = accuracy(self, group)\n print(\"Accuracy on epoch {} is {} \".format(epoch, acc))" ]
[ "0.71472293", "0.70600724", "0.70026565", "0.69949657", "0.6988527", "0.6967045", "0.6955149", "0.6940348", "0.694033", "0.68917704", "0.6864978", "0.68499064", "0.6846929", "0.6829985", "0.6814839", "0.68050236", "0.68016535", "0.6799331", "0.6787858", "0.67825234", "0.67695075", "0.6761829", "0.6754311", "0.67535496", "0.6743268", "0.6738554", "0.6724076", "0.67221", "0.6720773", "0.67168605", "0.67161846", "0.6714409", "0.6704055", "0.6698635", "0.66971487", "0.6689208", "0.6676618", "0.6670612", "0.66677004", "0.6667208", "0.66663635", "0.6665685", "0.66653603", "0.66518813", "0.66509944", "0.66431403", "0.6634126", "0.6612325", "0.66044676", "0.66006106", "0.6599085", "0.65806323", "0.6570246", "0.65565217", "0.65510374", "0.6545374", "0.65410376", "0.6539517", "0.6539517", "0.65378934", "0.6535404", "0.65155923", "0.65105283", "0.6507242", "0.65061677", "0.6503926", "0.65002817", "0.64949584", "0.64909035", "0.64855826", "0.64794576", "0.64735955", "0.64729565", "0.6471305", "0.64687806", "0.64553326", "0.6450829", "0.64499354", "0.64452165", "0.6444316", "0.64409304", "0.6440783", "0.6432659", "0.64291203", "0.6425261", "0.6423387", "0.6421484", "0.64157933", "0.64157265", "0.64137566", "0.6411952", "0.6403495", "0.639905", "0.6396042", "0.63939303", "0.6391619", "0.638872", "0.638754", "0.63860637", "0.63847864", "0.6384609" ]
0.0
-1
Return the accuracy attained by the neural network on the test set
def score(self, test_data): ins, outs = self._split_inputs_outputs(test_data) # One hot encode the input/labels encoder = LabelEncoder() encoder.fit(outs) enc_labels = encoder.transform(outs) enc_labels = np_utils.to_categorical(enc_labels) _, score = self.model.evaluate(ins, enc_labels, verbose=2) return score
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_accuracy(self) -> float:\n self.network.load_data()\n self.network.train()\n\n n = len(self.network.y_test)\n correct = 0\n for i in range(n):\n # Predict by running forward pass through the neural network\n pred = self.network.predict(self.network.x_test[i])\n # Sanity check of the prediction\n assert 0 <= pred <= 1, \"The prediction needs to be in [0, 1] range.\"\n # Check if right class is predicted\n correct += self.network.y_test[i] == round(float(pred))\n return round(correct / n, 3)", "def test(xtest, ytest, neural_net):\n loss, accuracy = neural_net.evaluate(xtest, ytest, verbose=0)\n return accuracy", "def accuracy(self, X_train, X_test):\n loss, accuracy = self.estimator.evaluate(X_test, X_train)\n return accuracy", "def accuracy(y_test, y_pred):\n\treturn accuracy_score(y_test, y_pred)", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.data.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def getAccuracy(self):\n\t\tcorrect = (self.testing[self.classLabel]==self.bestLabel).sum()\n\t\tself.accuracy = (correct/float(len(self.testing))) * 100.0", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.episode.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def compute_accuracy(self):\n if not self.is_training:\n logits = self.test_logits\n labels = self.data.test_labels\n else:\n logits = self.train_logits\n labels = self.data.labels\n\n predictions = tf.cast(tf.argmax(logits, 1), tf.int32)\n correct = tf.equal(labels, predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def accuracy(self, X_test, y_test):\n\t\tif X_test.ndim == 1:\n\t\t\tX_test = np.reshape(X_test, (X_test.shape[0],1))\n\t\ty_pred = self.predict(X_test)\n\t\treturn np.sum(np.argmax(y_pred,axis=1)==np.argmax(y_test,axis=1))/float(y_test.shape[0])", "def accuracy(predictions, test_labels):\n return f1_score(test_labels, predictions, average='micro') * 100", "def train_accuracy(self):\n # Train accuarcy\n add = np.ones(len(self.X_train))\n X_add1 = np.c_[add, self.X_train]\n pred_train = np.dot(X_add1, self.w_result.T)\n pred_train[pred_train > 0] = 1\n pred_train[pred_train < 0] = 0\n print(pred_train)\n train_check_lable = np.isclose(pred_train, self.y_train)\n num_true_lable = np.sum(train_check_lable)\n num_all_lable = np.size(train_check_lable)\n train_accuracy = num_true_lable / num_all_lable\n print(\"train_accuracy is: %f\" %train_accuracy)\n return train_accuracy", "def testAccuracy(self):\n \n loader = torch.utils.data.DataLoader(dataset=self.test, \n shuffle=False)\n acc = accuracy(self.model, loader)\n self.assertEqual(acc, 1.0)\n print(acc)", "def accuracy(self):\n total_predictions = self.tp + self.fp + self.tn + self.fn;\n return float(self.tp + self.tn) / total_predictions if total_predictions != 0 else 1", "def check_test_accuracy(self):\n print('\\n# Evaluate on test data')\n results = self.model.evaluate(self.data.test_dataset)\n print('\\ntest loss, test acc:', results)", "def test_network(self):\n train_accuracy = 100 - percentError(map(self.neural_result,\n self.train_inputs),\n self.train_outputs)\n print 'Train accuracy:', train_accuracy\n\n test_accuracy = 100 - percentError(map(self.neural_result,\n self.test_inputs),\n self.test_outputs)\n print 'Test accuracy:', test_accuracy\n\n print '#' * int(train_accuracy), 'TR'\n print '#' * int(test_accuracy), 'TE'", "def accuracy(self):", "def test_nn_predicts_accurate_results(self):\n self.nn.train_nn(self.X_train, self.y_train, 6, 10, 0.06)\n accuracy = 0\n X_test, y_test = load_data(\"../data/testdata.mat.tar.gz\")\n for i in range(len(X_test[:100])):\n out = self.nn.forward_prop(X_test[i])[0][-1]\n if np.argmax(out) == np.where(y_test[i])[0][0]:\n accuracy += 1\n else:\n print(\"Incorrect\", np.argmax(out))\n print(\"accuracy: \", accuracy)\n self.assertGreaterEqual(accuracy, 70)", "def accuracy(predictions, targets):\n return accuracy", "def test(self):\n self.load()\n bottleneck_features = np.load(self.feature_path)\n test = bottleneck_features['test']\n _, test_targets = load_dataset(self.image_path_test) \n predictions = [np.argmax(self.model.predict(np.expand_dims(feature, axis=0))) for feature in test]\n test_accuracy = 100*np.sum(np.array(predictions) == np.argmax(test_targets, axis=1))/len(predictions)\n print('{}, test accuracy: {:.4f}%'.format(self.name, test_accuracy))\n return test_accuracy", "def accuracy(self):\n return (self.table[0, 0] + self.table[1, 1]) / self.N", "def accuracy(pred, target):\n N = pred.shape[0]\n return (pred == target).sum() * 1.0 / N", "def accuracy(preds, labels):\n correct = preds == labels\n return correct.sum().float() / correct.shape[0]", "def accuracy(self):\n\t\treturn self.accuracy_", "def accuracy(self):\r\n # Load tarined model using intent id.\r\n clf = joblib.load(filename=self.intention_id+'.pkl')\r\n # Compute accuracy for hole training data and return.\r\n return clf.score(X=self.training_data, y=self.target_data)", "def accuracy(cls, test_labels):\n N = len(test_labels)\n\n # Calculate total correct as precentage\n total_correct = 100*(N - np.count_nonzero(cls - test_labels))/N\n\n # Calculate precentag correct for each class\n lab = np.unique(test_labels)\n cls_correct = {}\n for label in lab:\n idx = np.where(test_labels == label)[0]\n N_cls = len(idx)\n cls_correct[label] = 100*(N_cls - np.count_nonzero(label -\n cls[idx]))/N_cls\n\n print(\"Accuracy for:\")\n print(\"All classes is %.2f%%\" % total_correct)\n for label in lab:\n print(\"Class %d is %.2f%%\" % (label, cls_correct[label]))\n return(total_correct, cls_correct)", "def accuracy(self):\n if not self.run:\n self._run()\n return self.model_acc", "def findOveralAccuracy(trainData,testData):\r\n kNNClassifier = kNN(trainData)\r\n \r\n All_Predictions = kNNClassifier.classify(testData,k=5)\r\n \r\n reference_dictionary = testData.dataDict['Species']\r\n\r\n Overall_Accuracy = 100*sum(reference_dictionary== All_Predictions)/len(All_Predictions)\r\n \r\n return All_Predictions, Overall_Accuracy", "def accuracy(y_true, y_pred):\n assert y_true.shape == y_pred.shape\n return (y_true == y_pred).mean()", "def accuracy(classifier_output, true_labels):\n\n # TODO: finish this.\n conf_matrix = confusion_matrix(classifier_output, true_labels)\n return (conf_matrix[0][0]+conf_matrix[1][1])/(conf_matrix[0][0] + conf_matrix[0][1]\\\n + conf_matrix[1][0] + conf_matrix[1][1])", "def overall_accuracy(y_true, y_pred):\n pred_flat, true_flat = y_pred.flatten(), y_true.flatten()\n intersection = list(pred_flat == true_flat).count(True)\n sum_ = len(true_flat)\n accuracy = round(intersection/sum_, 4)\n return accuracy", "def test(self):\n self.eval()\n test_mask = self.data.test_mask\n labels = self.data.y\n output = self.forward(self.data)\n # output = self.output\n loss_test = F.nll_loss(output[test_mask], labels[test_mask])\n acc_test = utils.accuracy(output[test_mask], labels[test_mask])\n print(\"Test set results:\",\n \"loss= {:.4f}\".format(loss_test.item()),\n \"accuracy= {:.4f}\".format(acc_test.item()))\n return acc_test.item()", "def accuracy ( actuals, predictions ):\n return np.mean ( actuals == predictions )\n # End accuracy()", "def accuracy ( actuals, predictions ):\n return np.mean ( actuals == predictions )\n # End accuracy()", "def accuracy(output, target): # , topk=(1,)):\n correct = 0\n batch_size = target.size(0)\n for i in range(batch_size):\n tar = target[i].data.cpu().numpy()\n pred = output[i].data.cpu().numpy()\n if (tar) == np.argmax(pred):\n correct += 1\n return float(correct/batch_size)", "def get_test_accuracy(model, X_test, y_test):\n # Make predictions - test accuracy\n test_pred = model.predict(X_test)\n score = accuracy_score(test_pred, y_test)\n print(\"Test Accuracy:\", score)\n\n return test_pred", "def test(model, X_test, y_test, config):\n loss, y_pred = model.forward_pass(X_test)\n\n y_maxVals = np.amax(y_pred, axis=1).reshape(-1, 1)\n y_1hot = np.where(y_maxVals == y_pred, 1, 0)\n correct = np.sum(y_test * y_1hot)\n\n accuracy = correct / len(X_test)\n return accuracy", "def getaccuracy(features: ndarray, target: ndarray, trained_model) -> float:\n predictions = trained_model.predict(features)\n\n accuracy = accuracy_score(target, predictions, normalize=True)\n\n return accuracy", "def accuracy(self, X_train, y_train):\n y_train_pred = self.predict(X_train)\n diffs = y_train_pred - y_train\n count = 0.\n for i in range(y_train.shape[0]):\n if diffs[i] != 0:\n count+=1\n return 100 - count*100/y_train.shape[0]", "def accuracy(predictions, targets):\n\n compare = predictions == targets\n # compare = (predictions.argmax(dim=1)) == (targets)\n # compare = (predictions.argmax(dim=1)) == (targets.argmax(dim=1))\n # summed = compare.sum().item()\n summed = compare.sum()\n # print(summed, compare.size())\n # print(compare.size()[0])\n return summed/compare.size", "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n n_samples = targets.shape[0]\n _, y_pred = predictions.max(dim=1)\n accuracy = (y_pred == targets).sum().item() / n_samples\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "def evaluate_accuracy(net, data_iter): #@save\n metric = Accumulator(2) # No. of correct predictions, no. of predictions\n for _, (X, y) in enumerate(data_iter):\n metric.add(accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]", "def evaluate_accuracy(net, data_iter): #@save\n if isinstance(net, torch.nn.Module):\n net.eval() # Set the model to evaluation mode\n metric = Accumulator(2) # No. of correct predictions, no. of predictions\n for X, y in data_iter:\n metric.add(accuracy(net(X), y), len(y))\n return metric[0] / metric[1]", "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n i1 = np.arange(0, len(targets), 1)\n i2 = np.argmax(predictions, axis = 1)\n accuracy = targets[i1, i2].sum()/targets.sum()\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "def get_accuracy(test_sets, predictions, class_index):\n actual_classes = [test_set[class_index] for test_set in test_sets]\n\n num_correct = sum(int(actual == prediction) for actual, prediction in zip(actual_classes, predictions))\n\n return float(num_correct) / len(test_sets)", "def accuracy(net,dl):\r\n n_correct = 0\r\n n_total = 0\r\n net.eval()\r\n with torch.no_grad():\r\n for x_batch,y_batch in dl:\r\n pred = net(x_batch)\r\n pred = torch.argmax(pred,dim=1)\r\n n_correct += (pred == y_batch).float().sum().numpy()\r\n n_total += x_batch.size(0)\r\n\r\n return n_correct/n_total", "def accuracy(gt, pred):\n \n return np.mean(gt == pred)", "def accuracy(output, target):\n correct = target.eq(torch.round(output))\n correct = correct.float()*100.0\n correct = torch.mean(correct, 0)\n res = torch.mean(correct)\n\n return res,correct", "def accuracy(predicted, ground_truth):\n predicted_labels_decoded = np.argmax(predicted, axis=1)\n ground_truth_labels_decoded = np.argmax(ground_truth, axis=1)\n correct_rate = [1 if pred == truth else 0 for (pred, truth) in\n zip(predicted_labels_decoded, ground_truth_labels_decoded)]\n accuracy = sum(correct_rate) / ground_truth_labels_decoded.size\n return accuracy * 100", "def accuracy(links_true, links_pred=None, total=None):\n\n if isinstance(total, pandas.MultiIndex):\n total = len(total)\n\n if _isconfusionmatrix(links_true):\n confusion_matrix = links_true\n\n v = (confusion_matrix[0, 0] + confusion_matrix[1, 1]) / numpy.sum(\n confusion_matrix\n )\n else:\n tp = true_positives(links_true, links_pred)\n tn = true_negatives(links_true, links_pred, total)\n\n v = (tp + tn) / total\n\n return float(v)", "def accuracy(self, X, y):\n pred_labels = self.predict(X)\n return np.sum(pred_labels == y) / pred_labels.shape[0]", "def accuracy(labels, predictions, n_classes):\n\t\tequality = tf.equal(x = predictions, y = labels) # match the type of labels\n\t\treturn tf.reduce_mean(tf.cast(equality, tf.float32))", "def show_accuracy(self):\r\n return round(accuracy_score(self.actual, self.predicted),2)", "def test_accuracy(y, tx, w):\n labels = predict_regression_labels(w, tx)\n \n return (labels==y).sum()/len(y)", "def accuracy(self):\r\n return self._feature.attributes[self._schema.accuracy]", "def get_accuracy(self, predicted_y, actual_y, log_tests=False):\n if log_tests:\n for i in range(actual_y.shape[0]):\n print 'predicted = {0}, actual = {1}'.format(predicted_y[i], actual_y[i])\n return float(sum(predicted_y == actual_y)) / predicted_y.shape[0]", "def accuracy(actual, predicted):\n return np.sum(predicted == actual) / actual.shape[0]", "def accuracy(self, data, convert=False):\n if convert:\n results = [(np.argmax(self.feedforward(x)), np.argmax(y))\n for (x, y) in data]\n else:\n results = [(np.argmax(self.feedforward(x)), y)\n for (x, y) in data]\n return sum(int(x == y) for (x, y) in results)", "def accuracy(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0])", "def test(self):\n y_list = []\n y_hat_list = []\n for ex_dict in ut.TEST_LIST:\n y_list.append(ex_dict[1])\n y_hat_list.append(self.predict(ex_dict[0]))\n acc = ut.compute_accuracy(y_hat_list, y_list)\n return y_hat_list, acc", "def accuracy(outputs, labels):\n predicted = outputs.argmax(dim=1)\n correct = (predicted == labels).sum().item()\n return correct / labels.size(0)", "def accuracy(output, target):\n with torch.no_grad():\n batch_size = target.size(0)\n pred = torch.argmax(output, dim=1)\n correct = pred.eq(target)\n num_correct = correct.float().sum(0)\n return num_correct.mul_(100.0 / batch_size)", "def accuracy_info(self):\n return self.bayes_accuracy", "def multiclass_accuracy(prediction, ground_truth):\n correct = sum(a == b for a, b in zip(prediction, ground_truth))\n\n accuracy = correct / len(ground_truth)\n\n return accuracy", "def accuracy(labels, preds):\n\tassert labels.shape[0]==preds.shape[0]\n\treturn np.sum(preds==labels)/float(labels.shape[0])", "def accuracy1(y_test, predictions):\n accuracy = 0.0\n\n for i in range(y_test.shape[0]):\n intersection = 0.0\n union = 0.0\n for j in range(y_test.shape[1]):\n if int(y_test[i,j]) == 1 or int(predictions[i,j]) == 1:\n union += 1\n if int(y_test[i,j]) == 1 and int(predictions[i,j]) == 1:\n intersection += 1\n \n if union != 0:\n accuracy = accuracy + float(intersection/union)\n\n accuracy = float(accuracy/y_test.shape[0])\n\n return accuracy", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n return [correct[:k].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]", "def accuracy_fn(y_true, y_pred):\n correct = torch.eq(y_true, y_pred).sum().item()\n acc = (correct / len(y_pred)) * 100\n return acc", "def get_accuracy(pred, test_label, regression= \"logistic\"):\n if regression == \"multiclass\":\n pred_max = np.argmax(pred, axis=1)\n gt_max = np.argmax(test_label, axis=1)\n acc = np.sum(pred_max == gt_max)*100.0/pred.shape[0]\n elif regression == \"logistic\" or regression == \"probit\":\n if pred.ndim == 2:\n pred = pred[:,0]\n pred[pred >= 0.5] = 1.0\n pred[pred < 0.5] = 0.0\n acc = np.sum(pred == test_label)*100.0/pred.shape[0]\n\n return acc", "def _accuracy(net, data_loader):\n net.train(False)\n\n num_correct = 0\n num_total = 0\n for X, y in data_loader:\n # Data.\n X = torch.autograd.Variable(X.cuda())\n y = torch.autograd.Variable(y.cuda())\n X.requires_grad = True\n # Prediction.\n score = net(X)\n _, prediction = torch.max(score.data, 1)\n num_total += y.size(0)\n num_correct += torch.sum(prediction == y.data).item()\n net.train(True) # Set the model to training phase\n return 100 * num_correct / num_total", "def accuracy_score(y_true, y_pred):\n\ttp, fn, fp, tn = confusion_matrix(y_true, y_pred, table_show=False)\n\n\treturn (tp+tn) / (tp+tn+fn+fp)", "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n correct = 0\n for i in range(len(targets)):\n if(predictions[i] == targets[i]):\n correct += 1\n accuracy = correct/len(targets)\n #raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "def accuracy_score(y_true, y_pred):\n accuracy = np.sum(y_true == y_pred, axis=0) / len(y_true)\n return accuracy", "def accuracy_score(y_true, y_pred):\n accuracy = np.sum(y_true == y_pred, axis=0) / len(y_true)\n return accuracy", "def reportAccuracy(self, testLabels=\"\"):\n assert len(self._predictions) > 0\n rawTestLabelDump = self._read_file(testLabels)\n formattedTestLabels = [line for line in rawTestLabelDump.split('\\n')]\n corrects = [1 for x in zip(self._predictions, formattedTestLabels) if x[0] == x[1]]\n return (len(corrects) / len(self._predictions)) * 100", "def accuracy(y_true, y_pred):\r\n\r\n cm = confusion_matrix(y_true=y_true, y_pred=y_pred)\r\n cost_m = np.max(cm) - cm\r\n indices = linear_sum_assignment(cost_m)\r\n indices = np.asarray(indices)\r\n indexes = np.transpose(indices)\r\n total = 0\r\n for row, column in indexes:\r\n value = cm[row][column]\r\n total += value\r\n return total * 1. / np.sum(cm)", "def accuracy(outputs, labels):\n outputs = np.argmax(outputs, axis=1)\n return np.sum(outputs == labels)/float(labels.size)", "def accuracy(outputs, labels):\r\n outputs = np.argmax(outputs, axis=1)\r\n return np.sum(outputs == labels) / float(labels.size)", "def get_accuracy(model, task, batchmanager, test_set=False):\n\n model.eval()\n count, num = 0., 0\n batchmanager = batchmanager if isinstance(batchmanager, BatchManager) else batchmanager.batchmanagers[task]\n\n iter = batchmanager.test_iter if test_set else batchmanager.dev_iter\n\n with torch.no_grad():\n for batch in iter: \n data, targets = batch\n out = model(data, task)\n predicted = out.argmax(dim=1)\n count += (predicted == targets).sum().item()\n num += len(targets)\n\n model.train()\n return count / num", "def accuracy(self, y, x):\n prediction = self.predict(x)\n return np.mean(prediction == y)", "def accuracy(output, target, topk=(1,)):\n corrrect_ks = correct(output, target, topk)\n batch_size = target.size(0)\n return [correct_k.float().mul_(100.0 / batch_size) for correct_k in corrrect_ks]", "def accuracy(output, target, topk=(1,)):\n corrrect_ks = correct(output, target, topk)\n batch_size = target.size(0)\n return [correct_k.float().mul_(100.0 / batch_size) for correct_k in corrrect_ks]", "def get_accuracy(pos_test, neg_test, pos_train, neg_train):\n pos_file = open(pos_test, \"r\")\n neg_file = open(neg_test, \"r\")\n trained_pos = train_model(pos_train)\n trained_neg = train_model(neg_train)\n pos_count = 0\n #keeps track of how many positive reviews are accurately predicted\n total_pos_reviews = 0 \n neg_count = 0\n #keeps track of how many negative reviews are accurately predicted\n total_neg_reviews = 0\n for review in pos_file:\n classification = classify(review, trained_pos, trained_neg)\n total_pos_reviews += 1\n if classification == \"positive\":\n pos_count += 1 \n positive_accuracy = pos_count/total_pos_reviews \n for review in neg_file:\n classification = classify(review, trained_pos, trained_neg)\n total_neg_reviews += 1\n if classification == \"negative\":\n neg_count += 1 \n negative_accuracy = neg_count/total_neg_reviews \n total_accuracy = average(positive_accuracy, negative_accuracy)\n print(\"Positive accuracy: \" + str(positive_accuracy))\n print(\"Negative accuracy: \" + str(negative_accuracy))\n print(\"Total accuracy: \" + str(total_accuracy))", "def accuracy_score(y_test, predictions):\n # my implementation\n # correct_prediction = y_test == predictions\n # accuracy_score = (predictions[correct_prediction].shape[0] /\n # (predictions.shape[0] * 1.0))\n\n # better implementation\n if len(y_test) != len(predictions):\n raise ValueError('y_test and predictions are in different shape')\n\n return (y_test == predictions).mean()", "def accuracy(outputs, labels):\n outputs = np.argmax(outputs, axis=1)\n return np.sum(outputs==labels)/float(labels.size)", "def accuracy(outputs, labels):\n outputs = np.argmax(outputs, axis=1)\n return np.sum(outputs==labels)/float(labels.size)", "def test_accuracy(self, _input_data, _labels, quiet=False):\n test_loss, test_accuracy = (self.merged_model).evaluate(\n _input_data, _labels, verbose=0\n )\n\n return test_accuracy", "def accuracy(outputs, labels):\n outputs = np.argmax(outputs, axis=1)\n return np.sum(outputs == labels) / float(labels.size)", "def accuracy(outputs, targets):\n\n batch_size = targets.size(0)\n\n _, pred = torch.max(outputs.data, 1)\n correct = (pred == targets).sum().item()\n\n res = 100 * correct / batch_size\n return res", "def report_accuracy(\r\n predictions: pd.DataFrame, \r\n test_y: pd.DataFrame\r\n) -> None:\r\n # Calculate accuracy of predictions\r\n accuracy = (predictions == test_y).mean()\r\n \r\n # Log the accuracy of the model\r\n log = logging.getLogger(__name__)\r\n log.info(\"Model accuracy on test set: %0.2f%%\", accuracy * 100)\r\n\r\n mlflow.log_metric(\"accuracy\", accuracy)\r\n mlflow.set_tag(\"Model Version\", 1)", "def calc_accuracy(true, predicted):\n return sum([t==p for t,p in zip(true, predicted)]) / float(len(true))", "def accuracy(pred, labels):\n pred = torch.sigmoid(pred)\n predicted = (pred > 0.5).int()\n correct = (predicted == labels).sum().item()\n return correct / labels.shape[0]", "def accuracy(y_pred, y_true, mask=None, percentage=True):\n acc = metrics.accuracy_score(y_true, y_pred, normalize=True, sample_weight=mask)\n if percentage:\n acc *= 100\n return acc", "def compute_accuracy(Y_test, Y_pred):\n number_correct_prediction = 0\n for i in range(len(Y_pred)): # They have the same length\n id_pred = np.argmax(Y_pred[i]) # Take the argmax of the prediction\n id_test = np.where(Y_test[i] == 1.)[0][0] # Take the real position of the POS tag\n if id_test == id_pred:\n number_correct_prediction += 1\n\n percentage_correct = number_correct_prediction / len(Y_pred)\n\n return percentage_correct", "def accuracy_score(preds, y):\n accuracy = sum([1 for i in range (len(preds)) if preds[i] == y[i]])*1.0/len(preds) \n return accuracy", "def calculate_accuracy(network, dataloader):\n accuracy = metric.Accuracy()\n for data, labels in tqdm(dataloader):\n preds = network(data)\n accuracy.update(labels = labels,preds = preds)\n return accuracy", "def calculate_accuracy(network, dataloader):\n accuracy = metric.Accuracy()\n for data, labels in tqdm(dataloader):\n preds = network(data)\n accuracy.update(labels = labels,preds = preds)\n return accuracy", "def evaluate_accuracy(data_iter, net, ctx=[mx.cpu()]):\n acc = nd.array([0], ctx=ctx)\n n = 0\n for X, y in data_iter:\n X = X.as_in_context(ctx)\n y = y.astype('float32').as_in_context(ctx)\n acc += (net(X).argmax(axis=1) == y).sum()\n n += y.size\n return acc.asscalar() / n", "def accuracy(self, X, Y, y):\n P, _ = self.forward(X)\n out = np.argmax(P, axis=0).reshape(-1,1)\n return np.mean(np.where(y==out, 0, 1))", "def accuracy(probabilities: np.ndarray, labels: np.ndarray) -> float:\n # [batch_size]\n predictions = probabilities.argmax(axis=1)\n # [batch_size]\n labels = labels.argmax(axis=1)\n return (predictions == labels).astype(int).mean()", "def accuracy_compute(predictions, labels):\n with tf.name_scope('test_accuracy'):\n accu = 100 * np.sum(np.argmax(predictions, 1) == labels) / predictions.shape[0]\n tf.summary.scalar('test_accuracy', accu)\n return accu", "def accuracy(self, predictions, truth):\n return np.mean(np.argmax(predictions, axis=1) == truth) # <COGLINE>" ]
[ "0.8605905", "0.815202", "0.8070769", "0.8068605", "0.8035914", "0.80330867", "0.7938776", "0.7906472", "0.7895872", "0.78683805", "0.78303057", "0.77969366", "0.7765335", "0.7745827", "0.7736295", "0.77029103", "0.7656342", "0.76442033", "0.7615893", "0.7611401", "0.76058733", "0.75958925", "0.75873697", "0.7559909", "0.7541275", "0.7532912", "0.7531111", "0.7523292", "0.7506158", "0.7502956", "0.749617", "0.7483254", "0.7483254", "0.7459124", "0.7449514", "0.74178606", "0.73787946", "0.73594266", "0.73576915", "0.73555857", "0.735433", "0.7343087", "0.73398143", "0.73385924", "0.73219186", "0.7321775", "0.73040277", "0.726778", "0.7256137", "0.7254934", "0.7245473", "0.72403705", "0.7234701", "0.7224946", "0.7217949", "0.72081065", "0.720375", "0.7176767", "0.7176238", "0.71756065", "0.717383", "0.7168957", "0.7165292", "0.7158774", "0.71512645", "0.714828", "0.7120854", "0.710837", "0.71037436", "0.71007615", "0.7096387", "0.70911515", "0.70911515", "0.70877546", "0.70825315", "0.70804346", "0.70754933", "0.7075158", "0.7072876", "0.70698", "0.70698", "0.70635885", "0.7061103", "0.7059481", "0.7059481", "0.7055172", "0.7051253", "0.70495284", "0.7048558", "0.7045832", "0.70437175", "0.703891", "0.7037687", "0.7028267", "0.7021432", "0.7021432", "0.7017909", "0.7015009", "0.7006985", "0.69925576", "0.69884753" ]
0.0
-1
Python model creator fro tensor implementation in java Args
def __init__(self, dim=20, nIter=5, lamb=0.05, alph=40, user_features=["user"], item_features=["item"]): self.setParams(dim,nIter, lamb, alph) self.user_features = {} self.item_features = {} self.factors = {} self.user_column_names = user_features self.item_column_names = item_features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args", "def _make_model_v1():\n tf.reset_default_graph()\n\n with tf.Session() as sess:\n # Make two simple graphs, both of which will be served by TF\n x = tf.placeholder('float', shape=(None, 3), name='Input')\n z = tf.placeholder('float', shape=(), name='ScalarMultiple')\n m = tf.Variable([1.0, 1.0, 1.0], name='Slopes')\n y = m * x + 1\n len_fun = tf.reduce_sum(y - x) # Returns the number of elements in the array\n scale_mult = tf.multiply(z, x, name='scale_mult')\n\n # Initialize the variables\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # Create the tool for saving the model to disk\n builder = tf.saved_model.builder.SavedModelBuilder(tf_export_path)\n\n # Make descriptions for the inputs and outputs\n x_desc = tf.saved_model.utils.build_tensor_info(x)\n y_desc = tf.saved_model.utils.build_tensor_info(y)\n z_desc = tf.saved_model.utils.build_tensor_info(z)\n len_fun_desc = tf.saved_model.utils.build_tensor_info(len_fun)\n scale_mult_desc = tf.saved_model.utils.build_tensor_info(scale_mult)\n\n # Make a signature for the functions to be served\n func_sig = tf.saved_model.signature_def_utils.build_signature_def(\n inputs={'x': x_desc},\n outputs={'y': y_desc},\n method_name='run'\n )\n len_sig = tf.saved_model.signature_def_utils.build_signature_def(\n inputs={'x': x_desc},\n outputs={'len': len_fun_desc},\n method_name='length'\n )\n mult_sig = tf.saved_model.signature_def_utils.build_signature_def(\n inputs={'x': x_desc, 'z': z_desc},\n outputs={'scale_mult': scale_mult_desc},\n method_name='scalar_multiply'\n )\n\n # Add the functions and the state of the graph to the builder\n builder.add_meta_graph_and_variables(\n sess, [tf.saved_model.tag_constants.SERVING],\n signature_def_map={\n tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: func_sig,\n 'length': len_sig,\n 'scalar_multiply': mult_sig\n })\n\n # Save the function\n builder.save()", "def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)", "def cli(sys_argv: List[str]):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--model_definition', type=str,\n help='Path to json model definition')\n\n parser.add_argument('--model_state_path', type=str,\n help='Path where to the trained parameters')\n\n parser.add_argument('--data_path', type=str, default=TEST_PATH,\n help='path to the pickled dataframe on which prediction should be made')\n\n parser.add_argument('--numerical_preprocessor', type=str, default=NUMERICAL_PREPROCESSOR_SAVE_PATH,\n help='Path of the saved numerical preprocessor')\n\n parser.add_argument('--categorical_preprocessor', type=str, default=CATEGORICAL_PREPROCESSOR_SAVE_PATH,\n help='Path to the saved categorical preprocessor')\n\n parser.add_argument('--output_directory', type=str, default=RESULTS_DIR,\n help='Path where to save the prediction of the experiment')\n\n args = parser.parse_args(sys_argv)\n\n # # ---------- parse config file ---------- # #\n config: dict = json.load(open(args.model_definition, 'r'))\n\n model_class: str = config['model_class']\n model_name: str = config['model_name']\n numerical_input_features: List[str] = config['data']['numerical_input_features']\n categorical_input_features: List[str] = config['data']['categorical_input_features']\n output_features: List[str] = config['data']['output_features']\n batch_size_test: int = config['data']['batch_size_test']\n\n device = torch.device(CUDA if torch.cuda.is_available() else CPU)\n\n # # ---------- parse model state ---------- # #\n model_state = load_model_state(args.model_state_path, device)\n\n model_hyperparameters: dict = model_state['hyperparameters']\n model_hyperparameters.update(config['model'])\n model_hyperparameters['device']: torch.device = device\n model_weights: dict = model_state['best_model_state_dict']\n\n # # ---------- initialize model ---------- # #\n model = REGISTERED_MODELS[model_class](**model_hyperparameters).to(device)\n model.load(model_weights)\n\n # # ---------- preprocess data for inference ---------- # #\n test_loader = preprocess_for_inference(\n args.data_path,\n numerical_input_features,\n categorical_input_features,\n output_features,\n args.numerical_preprocessor,\n args.categorical_preprocessor,\n batch_size_test=batch_size_test\n )\n\n # # ---------- compute and save predictions ---------- # #\n predictions = model.predict(test_loader)\n\n # save predictions\n data_file_name = os.path.basename(args.data_path)\n data_file_name = os.path.splitext(data_file_name)[0] # remove extension\n model_path = '{}/predictions_{}_{}.pickle'.format(args.output_directory, model_name, data_file_name)\n print(' [predict] Saving predictions at: `{}`'.format(model_path))\n file_utils.save_to_pickle(\n predictions,\n path=model_path\n )\n print(' [predict] Done')", "def build_model_fn(self):", "def _main(args):\n # model = keras.models.load_model(args.h5_file, custom_objects={'prelu': prelu})\n with tf.keras.utils.custom_object_scope({'prelu': prelu}):\n converter = tf.lite.TFLiteConverter.from_keras_model_file(args.h5_file)\n tflite_file = converter.convert()\n open(args.tflite_file, 'wb').write(tflite_file)\n print('='*30)\n print('tffile file save in {}.'.format(args.tflite_file))", "def model_creator(config):\n return nn.Linear(1, 1)", "def model_create(ARGS):\n\n def retain(ARGS):\n \"\"\"Create the model\"\"\"\n\n # Define the constant for model saving\n reshape_size = ARGS.emb_size + ARGS.numeric_size\n if ARGS.allow_negative:\n embeddings_constraint = FreezePadding()\n beta_activation = 'tanh'\n output_constraint = None\n else:\n embeddings_constraint = FreezePadding_Non_Negative()\n beta_activation = 'sigmoid'\n output_constraint = non_neg()\n\n def reshape(data):\n \"\"\"Reshape the context vectors to 3D vector\"\"\"\n return K.reshape(x=data, shape=(K.shape(data)[0], 1, reshape_size))\n\n # Code Input\n codes = L.Input((None, None), name='codes_input')\n inputs_list = [codes]\n # Calculate embedding for each code and sum them to a visit level\n codes_embs_total = L.Embedding(ARGS.num_codes + 1,\n ARGS.emb_size,\n name='embedding'\n # BUG: embeddings_constraint not supported\n # https://github.com/tensorflow/tensorflow/issues/33755\n # ,embeddings_constraint=embeddings_constraint\n )(codes)\n codes_embs = L.Lambda(lambda x: K.sum(x, axis=2))(codes_embs_total)\n # Numeric input if needed\n if ARGS.numeric_size > 0:\n numerics = L.Input((None, ARGS.numeric_size), name='numeric_input')\n inputs_list.append(numerics)\n full_embs = L.concatenate([codes_embs, numerics], name='catInp')\n else:\n full_embs = codes_embs\n\n # Apply dropout on inputs\n full_embs = L.Dropout(ARGS.dropout_input)(full_embs)\n\n # Time input if needed\n if ARGS.use_time:\n time = L.Input((None, 1), name='time_input')\n inputs_list.append(time)\n time_embs = L.concatenate([full_embs, time], name='catInp2')\n else:\n time_embs = full_embs\n\n # Setup Layers\n # This implementation uses Bidirectional LSTM instead of reverse order\n # (see https://github.com/mp2893/retain/issues/3 for more details)\n\n # If training on GPU and Tensorflow use CuDNNLSTM for much faster training\n if glist:\n alpha = L.Bidirectional(L.CuDNNLSTM(ARGS.recurrent_size, return_sequences=True),\n name='alpha')\n beta = L.Bidirectional(L.CuDNNLSTM(ARGS.recurrent_size, return_sequences=True),\n name='beta')\n else:\n alpha = L.Bidirectional(L.LSTM(ARGS.recurrent_size,\n return_sequences=True, implementation=2),\n name='alpha')\n beta = L.Bidirectional(L.LSTM(ARGS.recurrent_size,\n return_sequences=True, implementation=2),\n name='beta')\n\n alpha_dense = L.Dense(1, kernel_regularizer=l2(ARGS.l2))\n beta_dense = L.Dense(ARGS.emb_size + ARGS.numeric_size,\n activation=beta_activation, kernel_regularizer=l2(ARGS.l2))\n\n # Compute alpha, visit attention\n alpha_out = alpha(time_embs)\n alpha_out = L.TimeDistributed(alpha_dense, name='alpha_dense_0')(alpha_out)\n alpha_out = L.Softmax(axis=1)(alpha_out)\n # Compute beta, codes attention\n beta_out = beta(time_embs)\n beta_out = L.TimeDistributed(beta_dense, name='beta_dense_0')(beta_out)\n # Compute context vector based on attentions and embeddings\n c_t = L.Multiply()([alpha_out, beta_out, full_embs])\n c_t = L.Lambda(lambda x: K.sum(x, axis=1))(c_t)\n # Reshape to 3d vector for consistency between Many to Many and Many to One implementations\n contexts = L.Lambda(reshape)(c_t)\n\n # Make a prediction\n contexts = L.Dropout(ARGS.dropout_context)(contexts)\n output_layer = L.Dense(1, activation='sigmoid', name='dOut',\n kernel_regularizer=l2(ARGS.l2), kernel_constraint=output_constraint)\n\n # TimeDistributed is used for consistency\n # between Many to Many and Many to One implementations\n output = L.TimeDistributed(output_layer, name='time_distributed_out')(contexts)\n # Define the model with appropriate inputs\n model = Model(inputs=inputs_list, outputs=[output])\n\n return model\n\n # Set Tensorflow to grow GPU memory consumption instead of grabbing all of it at once\n # If there are multiple GPUs set up a multi-gpu model\n # Get available gpus , returns empty list if none\n # glist = get_available_gpus()\n glist = []\n if len(glist) > 1:\n with tf.device('/cpu:0'):\n model = retain(ARGS)\n model_final = make_parallel(model, glist)\n else:\n model_final = retain(ARGS)\n\n # Compile the model - adamax has produced best results in our experiments\n model_final.compile(optimizer='adamax',\n loss='binary_crossentropy',\n #TODO: add AUPRC?\n metrics=[Recall(), specificity,\n SpecificityAtSensitivity(0.5,3),\n SensitivityAtSpecificity(0.5, 3),\n 'accuracy', AUC(), Precision()],\n sample_weight_mode=\"temporal\")\n return model_final", "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def create_model(self, input_shape, num_actions, mode, args, model_name='q_network'):\n assert (mode in (\"linear\", \"duel\", \"dqn\"))\n with tf.variable_scope(model_name):\n input_data = Input(shape=input_shape, name=\"input\")\n if mode == \"linear\":\n # #version 4 elu:\n # flatten_hidden = Flatten(name=\"flatten\")(input_data)\n # FC_1 = Dense(512, activation='elu', name='FC1-elu')(flatten_hidden)\n # FC_2 = Dense(512, activation='elu', name='FC2-elu')(FC_1)\n # FC_3 = Dense(512, activation='elu', name='FC3-elu')(FC_2)\n # FC_4 = Dense(512, activation='elu', name='FC4-elu')(FC_3)\n # output = Dense(num_actions, activation='elu', name=\"output\")(FC_4)\n\n #version 4 elu:\n flatten_hidden = Flatten(name=\"flatten\")(input_data)\n FC_1 = Dense(1024, activation='elu', name='FC1-elu')(flatten_hidden)\n FC_2 = Dense(1024, activation='elu', name='FC2-elu')(FC_1)\n FC_3 = Dense(1024, activation='elu', name='FC3-elu')(FC_2)\n FC_4 = Dense(512, activation='elu', name='FC4-elu')(FC_3)\n output = Dense(num_actions, activation='elu', name=\"output\")(FC_4)\n\n else:\n if not (args.recurrent):\n # # # version 1:\n # h1 = Convolution2D(32, (8, 8), strides=4, activation=\"relu\", name=\"conv1\")(input_data)\n # h2 = Convolution2D(64, (4, 4), strides=2, activation=\"relu\", name=\"conv2\")(h1)\n # h3 = Convolution2D(64, (3, 3), strides=1, activation=\"relu\", name=\"conv3\")(h2)\n # context = Flatten(name=\"flatten\")(h3)\n\n # # version 2:\n # conv1 = Convolution2D(1, (5, 5), strides=1, activation=\"elu\", name=\"conv1\")(input_data)\n # flatten = Flatten(name=\"flatten\")(conv1)\n # FC_2 = Dense(512, activation='elu', name='FC2-elu')(flatten)\n # context = Dense(512, activation='elu', name='FC4-elu')(FC_2)\n\n # version 3:\n conv1 = Convolution2D(32, (2, 2), strides=1, activation=\"relu\", name=\"conv1\")(input_data)\n flatten = Flatten(name=\"flatten\")(conv1)\n FC_2 = Dense(128, activation='relu', name='FC2-relu')(flatten)\n FC_3 = Dense(128, activation='relu', name='FC3-relu')(FC_2)\n context = Dense(128, activation='elu', name='FC4-elu')(FC_3)\n\n\n\n # else:\n # print('>>>> Defining Recurrent Modules...')\n # input_data_expanded = Reshape((input_shape[0], input_shape[1], input_shape[2], 1),\n # input_shape=input_shape)(input_data)\n # input_data_TimeDistributed = Permute((3, 1, 2, 4), input_shape=input_shape)(input_data_expanded)\n # h1 = TimeDistributed(Convolution2D(32, (8, 8), strides=4, activation=\"relu\", name=\"conv1\"), \\\n # input_shape=(args.num_frames, input_shape[0], input_shape[1], 1))(\n # input_data_TimeDistributed)\n # h2 = TimeDistributed(Convolution2D(64, (4, 4), strides=2, activation=\"relu\", name=\"conv2\"))(h1)\n # h3 = TimeDistributed(Convolution2D(64, (2, 2), strides=1, activation=\"relu\", name=\"conv3\"))(h2)\n # flatten_hidden = TimeDistributed(Flatten())(h3)\n # hidden_input = TimeDistributed(Dense(512, activation='relu', name='flat_to_512'))(flatten_hidden)\n # if not (args.a_t):\n # context = LSTM(512, return_sequences=False, stateful=False, input_shape=(args.num_frames, 512))(\n # hidden_input)\n # else:\n # if args.bidir:\n # hidden_input = Bidirectional(\n # LSTM(512, return_sequences=True, stateful=False, input_shape=(args.num_frames, 512)),\n # merge_mode='sum')(hidden_input)\n # all_outs = Bidirectional(\n # LSTM(512, return_sequences=True, stateful=False, input_shape=(args.num_frames, 512)),\n # merge_mode='sum')(hidden_input)\n # else:\n # all_outs = LSTM(512, return_sequences=True, stateful=False,\n # input_shape=(args.num_frames, 512))(hidden_input)\n # # attention\n # attention = TimeDistributed(Dense(1, activation='tanh'))(all_outs)\n # # print(attention.shape)\n # attention = Flatten()(attention)\n # attention = Activation('softmax')(attention)\n # attention = RepeatVector(512)(attention)\n # attention = Permute([2, 1])(attention)\n # sent_representation = merge([all_outs, attention], mode='mul')\n # context = Lambda(lambda xin: K.sum(xin, axis=-2), output_shape=(512,))(sent_representation)\n # # print(context.shape)\n\n if mode == \"dqn\":\n h4 = Dense(512, activation='elu', name=\"fc\")(context)\n output = Dense(num_actions, name=\"output\")(h4)\n # elif mode == \"duel\":\n # value_hidden = Dense(512, activation='relu', name='value_fc')(context)\n # value = Dense(1, name=\"value\")(value_hidden)\n # action_hidden = Dense(512, activation='relu', name='action_fc')(context)\n # action = Dense(num_actions, name=\"action\")(action_hidden)\n # action_mean = Lambda(lambda x: tf.reduce_mean(x, axis=1, keep_dims=True), name='action_mean')(\n # action)\n # output = Lambda(lambda x: x[0] + x[1] - x[2], name='output')([action, value, action_mean])\n model = Model(inputs=input_data, outputs=output)\n print(model.summary())\n return model", "def __init__(self, model_name: str, label_file: str) -> None:\n\n # Append TFLITE extension to model_name if there's no extension\n _, ext = os.path.splitext(model_name)\n if not ext:\n model_name += '.tflite'\n\n # Initialize the TFLite model.\n interpreter = Interpreter(model_path=model_name, num_threads=4)\n interpreter.allocate_tensors()\n\n self._input_index = interpreter.get_input_details()[0]['index']\n self._output_index = interpreter.get_output_details()[0]['index']\n self._interpreter = interpreter\n\n self.pose_class_names = self._load_labels(label_file)", "def create_model(self, **inputs):\n raise NotImplementedError('This method has to be overwritten.')", "def evaluate(args):\n dataset_param_filepath = os.path.join(args.model, 'dataset.params')\n dataset_params = putils.load_params(dataset_param_filepath)\n source_vocab_filepath = os.path.join(args.model, 'source.vocab')\n source_vocab = Vocab(vocab_filepath=source_vocab_filepath)\n target_vocab_filepath = os.path.join(args.model, 'target.vocab')\n target_vocab = Vocab(vocab_filepath=target_vocab_filepath)\n model_params_filepath = os.path.join(args.model, 'model.params')\n model_params = putils.load_params(model_params_filepath)\n checkpoint_filepath = os.path.join(args.model, 'checkpoint.tar')\n if not torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location=const.DEVICE)\n elif torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath)\n elif torch.cuda.is_available() and not model_params['cuda']:\n logger.info('Loading a CPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location='cuda:0')\n else:\n logger.info('Loading a CPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath)\n encoder = Encoder(model_type=checkpoint['encoder']['model_type'],\n input_size=checkpoint['encoder']['input_size'],\n hidden_size=checkpoint['encoder']['hidden_size'],\n num_layers=checkpoint['encoder']['num_layers'],\n nonlinearity=checkpoint['encoder']['nonlinearity'],\n bias=checkpoint['encoder']['bias'],\n dropout=checkpoint['encoder']['dropout'],\n bidirectional=checkpoint['encoder']['bidirectional'])\n if checkpoint['with_attention']:\n decoder = Attention(model_type=checkpoint['decoder']['model_type'],\n hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n max_seq_len=dataset_params['max_seq_len'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'],\n bidirectional=checkpoint['decoder']['bidirectional'])\n else:\n decoder = Decoder(model_type=checkpoint['decoder']['model_type'],\n hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'],\n bidirectional=checkpoint['decoder']['bidirectional'])\n encoder.load_state_dict(checkpoint['encoder_state_dict'])\n decoder.load_state_dict(checkpoint['decoder_state_dict'])\n if torch.cuda.is_available():\n encoder.to(const.DEVICE)\n decoder.to(const.DEVICE)\n encoder.eval()\n decoder.eval()\n indexes = putils.index_dataset(\n args.data, source_vocab.item2idx, target_vocab.item2idx,\n dataset_params['is_character_based'], dataset_params['max_seq_len'],\n dataset_params['is_reversed'])\n if args.random > 0:\n random.shuffle(indexes)\n for seq_num in range(args.random):\n seq = indexes[seq_num]\n print('-'*80)\n print('>', ' '.join([source_vocab.idx2item[idx]\n for idx in seq[0]]))\n print('=', ' '.join([target_vocab.idx2item[idx]\n for idx in seq[1]]))\n # TODO: add support for OOV\n predicted_idx, _ = _decode(seq[0], encoder, decoder,\n checkpoint['with_attention'],\n dataset_params['max_seq_len'])\n print('<', ' '.join([target_vocab.idx2item[idx]\n for idx in predicted_idx]))\n else:\n _evaluate(indexes, encoder, decoder, target_vocab, checkpoint,\n dataset_params)", "def _make_model_v2():\n class CustomModule(tf.Module):\n\n def __init__(self):\n super().__init__()\n self.m = tf.Variable([1.0, 1.0, 1.0], name='slope')\n\n @tf.function\n def __call__(self, x):\n y = self.m * x + 1\n return y\n\n @tf.function(input_signature=[tf.TensorSpec((None, 3), tf.float32)])\n def length(self, x):\n return tf.reduce_sum(self(x) - x, name='length')\n\n @tf.function(input_signature=[tf.TensorSpec([], tf.float32),\n tf.TensorSpec((None, 3), tf.float32)])\n def scalar_multiply(self, z, x):\n return tf.multiply(z, x, name='scale_mult')\n\n module = CustomModule()\n\n # Make a concrete version of __call__\n call = module.__call__.get_concrete_function(tf.TensorSpec((None, 3)))\n\n tf.saved_model.save(\n module, tf_export_path, signatures={\n tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: call,\n 'length': module.length,\n 'scalar_multiply': module.scalar_multiply\n }\n )", "def create_model(args, pretrained_embedding: np.ndarray, model_name='deep_q_network', trainable=True):\n\n state = Input(shape=(args.n_features,))\n model = None\n\n n, m = pretrained_embedding.shape\n print('shape', pretrained_embedding.shape)\n embedded = Embedding(n, m, embeddings_initializer=keras.initializers.constant(pretrained_embedding))(state)\n\n if model_name == \"deep_q_network\":\n print(\"Building \" + model_name + \" ...\")\n\n # First convolutional layer\n\n x = Dense(args.hidden_size, activation=K.relu)(embedded)\n x = Dropout(args.dropout)(x)\n x = Flatten()(x)\n y_pred = Dense(args.n_actions, trainable=trainable)(x)\n\n model = Model(inputs=state, outputs=y_pred)\n\n elif model_name == \"deep_q_network_double\":\n print(\"Building \" + model_name + \" ...\")\n\n x = Dense(args.hidden_size, activation=K.relu)(embedded)\n x = Dropout(args.dropout)(x)\n x = Flatten()(x)\n y_pred = Dense(args.n_actions, trainable=trainable)(x)\n\n model = Model(input=state, output=y_pred)\n\n elif model_name == \"deep_q_network_duel\":\n print(\"Building \" + model_name + \" ...\")\n\n x = Dense(args.hidden_size, activation=K.relu)(embedded)\n x = Dropout(args.dropout)(x)\n x = Flatten()(x)\n\n y_pred = Dense(args.n_actions, trainable=trainable)(x)\n\n # value output\n x_val = Dense(args.hidden_size, trainable=trainable)(x)\n # x_val = Activation('relu')(x_val)\n y_val = Dense(1, trainable=trainable)(x_val)\n\n # advantage output\n x_advantage = Dense(args.hidden_size, trainable=trainable)(x)\n # x_advantage = Activation('relu')(x_advantage)\n y_advantage = Dense(args.n_actions, trainable=trainable)(x_advantage)\n # mean advantage\n y_advantage_mean = Lambda(lambda x: K.mean(x, axis=1, keepdims=True))(y_advantage)\n\n y_q = Lambda(lambda x: x[0] + x[1] - x[2])([y_val, y_advantage, y_advantage_mean])\n\n model = Model(input=state, output=y_q)\n\n else:\n print(\"Model not supported\")\n exit(1)\n\n return model", "def build_model():", "def tensor(*args, **kwargs):\n return Tensor(*args, **kwargs)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-v', '--verbose', action='store_true',\n help='enable verbose output (for debugging)')\n parser.add_argument(\n '-c', '--category_num', type=int,\n help='number of object categories (obsolete)')\n parser.add_argument(\n '-m', '--model', type=str, required=True,\n help=('[yolov3-tiny|yolov3|yolov3-spp|yolov4-tiny|yolov4|'\n 'yolov4-csp|yolov4x-mish|yolov4-p5]-[{dimension}], where '\n '{dimension} could be either a single number (e.g. '\n '288, 416, 608) or 2 numbers, WxH (e.g. 416x256)'))\n parser.add_argument(\n '--int8', action='store_true',\n help='build INT8 TensorRT engine')\n parser.add_argument(\n '--dla_core', type=int, default=-1,\n help='id of DLA core for inference (0 ~ N-1)')\n args = parser.parse_args()\n\n engine = build_engine(\n args.model, args.int8, args.dla_core, args.verbose)\n if engine is None:\n raise SystemExit('ERROR: failed to build the TensorRT engine!')\n\n engine_path = '%s.trt' % args.model\n with open(engine_path, 'wb') as f:\n f.write(engine.serialize())\n print('Serialized the TensorRT engine to file: %s' % engine_path)", "def build(self, input_tensors, is_training, lengths=None, hparams=None):", "def __init__(\n self,\n train_X: Tensor,\n train_Y: Tensor,\n train_Yvar: Optional[Tensor],\n task_feature: int,\n output_tasks: Optional[List[int]] = None,\n rank: Optional[int] = None,\n outcome_transform: Optional[OutcomeTransform] = None,\n input_transform: Optional[InputTransform] = None,\n pyro_model: Optional[PyroModel] = None,\n ) -> None:\n if not (\n train_X.ndim == train_Y.ndim == 2\n and len(train_X) == len(train_Y)\n and train_Y.shape[-1] == 1\n ):\n raise ValueError(\n \"Expected train_X to have shape n x d and train_Y to have shape n x 1\"\n )\n if train_Yvar is not None and train_Y.shape != train_Yvar.shape:\n raise ValueError(\n \"Expected train_Yvar to be None or have the same shape as train_Y\"\n )\n with torch.no_grad():\n transformed_X = self.transform_inputs(\n X=train_X, input_transform=input_transform\n )\n if outcome_transform is not None:\n train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar)\n if train_Yvar is not None: # Clamp after transforming\n train_Yvar = train_Yvar.clamp(MIN_INFERRED_NOISE_LEVEL)\n\n super().__init__(\n train_X=train_X,\n train_Y=train_Y,\n train_Yvar=train_Yvar,\n task_feature=task_feature,\n output_tasks=output_tasks,\n )\n self.to(train_X)\n\n self.mean_module = None\n self.covar_module = None\n self.likelihood = None\n self.task_covar_module = None\n self.register_buffer(\"latent_features\", None)\n if pyro_model is None:\n pyro_model = MultitaskSaasPyroModel()\n pyro_model.set_inputs(\n train_X=transformed_X,\n train_Y=train_Y,\n train_Yvar=train_Yvar,\n task_feature=task_feature,\n task_rank=rank,\n )\n self.pyro_model = pyro_model\n if outcome_transform is not None:\n self.outcome_transform = outcome_transform\n if input_transform is not None:\n self.input_transform = input_transform", "def create_ars_model(odim=10,adim=2,hdims=[128],\n actv=tf.nn.relu,out_actv=tf.nn.tanh):\n import tensorflow as tf\n \n def mlp(x,hdims=[256,256],actv=tf.nn.relu,out_actv=tf.nn.relu):\n ki = tf.truncated_normal_initializer(stddev=0.1)\n for hdim in hdims[:-1]:\n x = tf.layers.dense(x,units=hdim,activation=actv,kernel_initializer=ki)\n return tf.layers.dense(x,units=hdims[-1],\n activation=out_actv,kernel_initializer=ki)\n def placeholder(dim=None):\n return tf.placeholder(dtype=tf.float32,shape=(None,dim) if dim else (None,))\n def placeholders(*args):\n \"\"\"\n Usage: a_ph,b_ph,c_ph = placeholders(adim,bdim,None)\n \"\"\"\n return [placeholder(dim) for dim in args]\n def get_vars(scope):\n return [x for x in tf.compat.v1.global_variables() if scope in x.name]\n \n # Have own session\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n \n # Placeholders\n o_ph = placeholder(odim)\n \n # Policy \n with tf.variable_scope('main'):\n mu = mlp(o_ph,hdims=hdims+[adim],actv=actv,out_actv=out_actv)\n \n # Params\n main_vars = get_vars('main')\n \n model = {'o_ph':o_ph,'mu':mu,'main_vars':main_vars}\n return model, sess", "def __init__(self, model_path, alpha):\n\n with tf.keras.utils.CustomObjectScope({'tf': tf}):\n self.base_model = K.models.load_model(model_path)\n self.base_model.save(base_model)\n\n A_input = tf.placeholder(tf.float32, (None, 96, 96, 3))\n P_input = tf.placeholder(tf.float32, (None, 96, 96, 3))\n N_input = tf.placeholder(tf.float32, (None, 96, 96, 3))\n inputs = [A_inputs, P_inputs, N_inputs]\n outputs_embedding = self.base_model(inputs)\n \"\"\"\n P = self.base_model(P_input)\n N = self.base_model(N_input)\n \"\"\"\n tl = TripletLoss(alpha)\n output = tl(outputs_embedding)\n\n training_model = K.models.Model(inputs, output)\n training_model.compile(optimizer='Adam')\n training_model.save('training_model')", "def build_model(cls, args, task):\n\n # make sure all arguments are present in older models\n base_lm_architecture(args)\n\n if args.decoder_layers_to_keep:\n args.decoder_layers = len(args.decoder_layers_to_keep.split(\",\"))\n\n if getattr(args, \"max_target_positions\", None) is None:\n args.max_target_positions = getattr(\n args, \"tokens_per_sample\", DEFAULT_MAX_TARGET_POSITIONS\n )\n\n if args.character_embeddings:\n embed_tokens = CharacterTokenEmbedder(\n task.source_dictionary,\n eval(args.character_filters),\n args.character_embedding_dim,\n args.decoder_embed_dim,\n args.char_embedder_highway_layers,\n )\n elif args.adaptive_input:\n embed_tokens = AdaptiveInput(\n len(task.source_dictionary),\n task.source_dictionary.pad(),\n args.decoder_input_dim,\n args.adaptive_input_factor,\n args.decoder_embed_dim,\n options.eval_str_list(args.adaptive_input_cutoff, type=int),\n args.quant_noise_pq,\n args.quant_noise_pq_block_size,\n )\n else:\n embed_tokens = cls.build_embedding(\n args, task.source_dictionary, args.decoder_input_dim\n )\n\n if args.tie_adaptive_weights:\n assert args.adaptive_input\n assert args.adaptive_input_factor == args.adaptive_softmax_factor\n assert (\n args.adaptive_softmax_cutoff == args.adaptive_input_cutoff\n ), \"{} != {}\".format(\n args.adaptive_softmax_cutoff, args.adaptive_input_cutoff\n )\n assert args.decoder_input_dim == args.decoder_output_dim\n\n decoder = LinearTransformerDecoder(\n args, task.target_dictionary, embed_tokens, no_encoder_attn=True\n )\n return cls(decoder)", "def build_model(cls, args, task):\n raise NotImplementedError(\"Model must implement the build_model method\")", "def build_model():\n with tf.name_scope('placeholders'):\n real_data_int = tf.placeholder(tf.int32, shape=[None, picture_size])\n x_true = 2 * ((tf.cast(real_data_int, tf.float32) / 255.) - .5)\n z = tf.placeholder(tf.float32, [None, input_dim])\n if use_JL:\n JL = tf.placeholder(tf.float32, [d_last_layer_size, JL_dim])\n P_non_normalized = tf.placeholder(tf.float32, [JL_dim, n_projections])\n P_non_normalized_SWD = tf.placeholder(tf.float32, [picture_size, n_projections])\n else:\n JL = None\n P_non_normalized = tf.placeholder(tf.float32, [d_last_layer_size, n_projections])\n P_non_normalized_SWD = tf.placeholder(tf.float32, [picture_size, n_projections])\n\n x_generated = generator(z, n_features_first=n_features_first_g,\n n_features_reduction_factor=n_features_reduction_factor, min_features=64,\n BN=BN, power=power, extra_layer=extra_layer_g,\n init_method=init_method, n_features_image=n_features_image)\n\n d_pred_true, d_last_true = discriminator(x_true, reuse=False, n_features_last=n_features_last_d,\n n_features_increase_factor=n_features_reduction_factor,\n min_features=min_features, d_BN=d_BN, power=power,\n n_features_image=n_features_image, init_method=init_method)\n d_pred_gen, d_last_gen = discriminator(x_generated, reuse=True, n_features_last=n_features_last_d,\n n_features_increase_factor=n_features_reduction_factor,\n min_features=min_features, d_BN=d_BN, power=power,\n n_features_image=n_features_image, init_method=init_method)\n\n # define generator loss (big part taken from SWG)\n with tf.name_scope('g_loss'):\n # apply the Johnson-Lindenstrauss map, if wanted, to the flattened array\n if use_JL:\n JL_true = tf.matmul(d_last_true, JL)\n JL_gen = tf.matmul(d_last_gen, JL)\n else:\n JL_true = d_last_true\n JL_gen = d_last_gen\n\n # next project the samples (images). After being transposed, we have tensors\n # of the format: [[projected_image1_proj1, projected_image2_proj1, ...],\n # [projected_image1_proj2, projected_image2_proj2, ...],...]\n # Each row has the projections along one direction. This makes it easier for the sorting that follows.\n # first normalize the random normal vectors to lie in the sphere\n P = tf.nn.l2_normalize(P_non_normalized, axis=0)\n\n projected_true = tf.transpose(tf.matmul(JL_true, P))\n projected_fake = tf.transpose(tf.matmul(JL_gen, P))\n\n sorted_true, true_indices = tf.nn.top_k(input=projected_true, k=batch_size)\n sorted_fake, fake_indices = tf.nn.top_k(input=projected_fake, k=batch_size)\n\n # For faster gradient computation, we do not use sorted_fake to compute\n # loss. Instead we re-order the sorted_true so that the samples from the\n # true distribution go to the correct sample from the fake distribution.\n\n # It is less expensive (memory-wise) to rearrange arrays in TF.\n # Flatten the sorted_true from dim [n_projections, batch_size].\n flat_true = tf.reshape(sorted_true, [-1])\n\n # Modify the indices to reflect this transition to an array.\n # new index = row + index\n rows = np.asarray([batch_size * np.floor(i * 1.0 / batch_size) for i in range(n_projections * batch_size)])\n rows = rows.astype(np.int32)\n flat_idx = tf.reshape(fake_indices, [-1, 1]) + np.reshape(rows, [-1, 1])\n\n # The scatter operation takes care of reshaping to the rearranged matrix\n shape = tf.constant([batch_size * n_projections])\n rearranged_true = tf.reshape(tf.scatter_nd(flat_idx, flat_true, shape), [n_projections, batch_size])\n\n generator_loss = tf.reduce_mean(tf.square(projected_fake - rearranged_true))\n\n # get the sliced Wasserstein distance (SWD) (since SWD and JLSWD are not comparable)\n with tf.name_scope('SWD'):\n P_SWD = tf.nn.l2_normalize(P_non_normalized_SWD, axis=0)\n\n projected_true_SWD = tf.transpose(tf.matmul(x_true, P_SWD))\n projected_fake_SWD = tf.transpose(tf.matmul(x_generated, P_SWD))\n\n sorted_true_SWD, true_indices_SWD = tf.nn.top_k(input=projected_true_SWD, k=batch_size)\n sorted_fake_SWD, fake_indices_SWD = tf.nn.top_k(input=projected_fake_SWD, k=batch_size)\n\n flat_true_SWD = tf.reshape(sorted_true_SWD, [-1])\n flat_idx_SWD = tf.reshape(fake_indices_SWD, [-1, 1]) + np.reshape(rows, [-1, 1])\n\n rearranged_true_SWD = tf.reshape(tf.scatter_nd(flat_idx_SWD, flat_true_SWD, shape),\n [n_projections, batch_size])\n\n SWD = tf.reduce_mean(tf.square(projected_fake_SWD - rearranged_true_SWD))\n\n # define the discriminator loss\n with tf.name_scope('d_loss'):\n d_true_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_pred_true), logits=d_pred_true)\n d_fake_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_pred_gen), logits=d_pred_gen)\n discriminator_loss = tf.reduce_mean(d_true_loss + d_fake_loss)\n\n with tf.name_scope('g_optimizer'):\n generator_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')\n g_optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.5)\n g_train = g_optimizer.minimize(generator_loss, var_list=generator_vars)\n\n with tf.name_scope('d_optimizer'):\n discriminator_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')\n d_optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.5)\n d_train = d_optimizer.minimize(discriminator_loss, var_list=discriminator_vars)\n\n return real_data_int, z, x_generated, JL, P_non_normalized, P_non_normalized_SWD, SWD, g_train, d_train", "def evaluate(args):\n dataset_param_filepath = os.path.join(args.model, 'dataset.params')\n dataset_params = putils.load_params(dataset_param_filepath)\n left_vocab_filepath = os.path.join(args.model, 'left.vocab')\n left_vocab = Vocab(vocab_filepath=left_vocab_filepath)\n right_vocab_filepath = os.path.join(args.model, 'right.vocab')\n right_vocab = Vocab(vocab_filepath=right_vocab_filepath)\n model_params_filepath = os.path.join(args.model, 'model.params')\n model_params = putils.load_params(model_params_filepath)\n checkpoint_filepath = os.path.join(args.model, 'checkpoint.tar')\n if not torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location=const.DEVICE)\n elif torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath)\n elif torch.cuda.is_available() and not model_params['cuda']:\n logger.info('Loading a CPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location='cuda:0')\n else:\n logger.info('Loading a CPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath)\n if checkpoint['encoder']['model_type'] == 'transformer':\n encoder = TEncoder(input_size=checkpoint['encoder']['input_size'],\n hidden_size=checkpoint['encoder']['hidden_size'],\n num_layers=checkpoint['encoder']['num_layers'],\n dropout=checkpoint['encoder']['dropout'],\n num_attention_heads=checkpoint['encoder']['num_attention_heads'])\n else:\n encoder = Encoder(model_type=checkpoint['encoder']['model_type'],\n input_size=checkpoint['encoder']['input_size'],\n hidden_size=checkpoint['encoder']['hidden_size'],\n num_layers=checkpoint['encoder']['num_layers'],\n nonlinearity=checkpoint['encoder']['nonlinearity'],\n bias=checkpoint['encoder']['bias'],\n dropout=checkpoint['encoder']['dropout'],\n bidirectional=checkpoint['encoder']['bidirectional'])\n if checkpoint['decoder']['model_type'] == 'transformer':\n decoder = TDecoder(hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n num_layers=checkpoint['decoder']['num_layers'],\n dropout=checkpoint['decoder']['dropout'],\n num_attention_heads=checkpoint['decoder']['num_attention_heads'])\n elif checkpoint['decoder']['with_attention']:\n decoder = Attention(hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n max_seq_len=dataset_params['max_seq_len'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'])\n else:\n decoder = Decoder(model_type=checkpoint['decoder']['model_type'],\n hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'])\n encoder.load_state_dict(checkpoint['encoder_state_dict'])\n decoder.load_state_dict(checkpoint['decoder_state_dict'])\n if torch.cuda.is_available():\n encoder.to(const.DEVICE)\n decoder.to(const.DEVICE)\n encoder.eval()\n decoder.eval()\n pairs = putils.convert_to_seq_pairs(args.data)\n indexed_pairs = putils.index_pairs(pairs, left_vocab.char2idx,\n right_vocab.char2idx)\n if dataset_params['reverse']:\n indexed_pairs = [(y, x) for x, y in indexed_pairs]\n source_vocab = right_vocab\n target_vocab = left_vocab\n else:\n source_vocab = left_vocab\n target_vocab = right_vocab\n if args.random > 0:\n random.shuffle(indexed_pairs)\n for seq_num in range(args.random):\n seq = indexed_pairs[seq_num]\n print('-'*80)\n input_str = ' '.join(\n ''.join([source_vocab.idx2char[idx] for idx in seq[0] if idx\n not in [const.SOS_IDX, const.EOS_IDX]])\n .split(const.SEP))\n gold_str = ' '.join(\n ''.join([target_vocab.idx2char[idx] for idx in seq[1] if idx\n not in [const.SOS_IDX, const.EOS_IDX]])\n .split(const.SEP))\n predicted_idxx = decode(seq[0], args.itemize, encoder, decoder,\n dataset_params['max_seq_len'])\n pred_str = ' '.join(\n ''.join([target_vocab.idx2char[idx] for idx in predicted_idxx\n if idx not in [const.SOS_IDX, const.EOS_IDX]])\n .split(const.SEP))\n print('>', input_str)\n print('=', gold_str)\n print('<', pred_str)\n else:\n _evaluate(indexed_pairs, args.itemize, encoder, decoder,\n target_vocab.idx2char, dataset_params['max_seq_len'])", "def init_model(model):\n model(tf.random.uniform((1, 512, 512, 3)))", "def __init__(self, name): #, keep_axis=1):\n\n #:param int keep_axis: The axis which will be exempt from flattening (can only be one at most). If keep_axis is negative, we will count from the end.\n #self.keep_axis = keep_axis\n\n # generate the model in tensorflow\n self.name = name\n super().__init__(name=name)", "def __init__(self, inputs, outputs,\n session=tf.get_default_session, name='function'):\n self.session, self.name = session, name\n self.inputs, self.outputs = inputs, outputs", "def main():\n flags = PARSER.parse_args()\n\n if flags.to == 'savedmodel':\n to_savedmodel(input_shape=flags.input_shape,\n model_fn=unet_fn,\n src_dir=flags.checkpoint_dir,\n dst_dir='./saved_model',\n input_names=['IteratorGetNext'],\n output_names=['total_loss_ref'],\n use_amp=flags.use_amp,\n use_xla=flags.use_xla,\n compress=flags.compress)\n if flags.to == 'tensorrt':\n ds = Dataset(data_dir=flags.data_dir,\n batch_size=1,\n augment=False,\n gpu_id=0,\n num_gpus=1,\n seed=42)\n iterator = ds.test_fn(count=1).make_one_shot_iterator()\n features = iterator.get_next()\n\n sess = tf.Session()\n\n def input_data():\n return {'input_tensor:0': sess.run(features)}\n\n to_tensorrt(src_dir=flags.savedmodel_dir,\n dst_dir='./tf_trt_model',\n precision=flags.precision,\n feed_dict_fn=input_data,\n num_runs=1,\n output_tensor_names=['Softmax:0'],\n compress=flags.compress)\n if flags.to == 'onnx':\n to_onnx(src_dir=flags.savedmodel_dir,\n dst_dir='./onnx_model',\n compress=flags.compress)", "def build_model(tparams, options):\n opt_ret = dict()\n use_noise = theano.shared(numpy.asarray(1., dtype=theano.config.floatX))\n try:\n trng = RandomStreams(1234, use_cuda=True)\n except:\n print \"Could not apply use_cuda==True in RandonStreams ...\"\n trng = RandomStreams(1234)\n\n xs = []\n xmasks = []\n\n langs = options['langs']\n for lang in langs:\n # description string: #words x #samples\n x_lang = tensor.matrix('x_%s'%lang, dtype='int64')\n mask_lang = tensor.matrix('mask_%s'%lang, dtype='float32')\n xs.append(x_lang)\n xmasks.append(mask_lang)\n\n xs_r = []\n xmasks_r = []\n if options['bidirectional_enc']:\n for i,lang in enumerate(langs):\n x_lang = xs[i]\n mask_lang = xmasks[i]\n # reverse\n x_lang_r = x_lang[::-1]\n mask_lang_r = mask_lang[::-1]\n\n xs_r.append(x_lang_r)\n xmasks_r.append(mask_lang_r)\n\n sents_all = []\n im = tensor.matrix('im', dtype='float32')\n n_samples = im.shape[0]\n\n for i,lang in enumerate(langs):\n x_lang = xs[i]\n mask_lang = xmasks[i]\n\n n_timesteps_lang = x_lang.shape[0]\n n_samples_lang = x_lang.shape[1]\n\n if options['use_dropout']:\n # dropout probs for the word embeddings\n retain_probability_emb = 1-options['dropout_embedding']\n # dropout probs for the RNN hidden states\n retain_probability_hidden = 1-options['dropout_hidden']\n # dropout probs for the source words\n retain_probability_source = 1-options['dropout_source']\n # hidden states\n rec_dropout = shared_dropout_layer((2, n_samples_lang, options['dim']), use_noise, trng, retain_probability_hidden)\n rec_dropout_r = shared_dropout_layer((2, n_samples_lang, options['dim']), use_noise, trng, retain_probability_hidden)\n # word embeddings\n emb_dropout = shared_dropout_layer((2, n_samples_lang, options['dim_word']), use_noise, trng, retain_probability_emb)\n emb_dropout_r = shared_dropout_layer((2, n_samples_lang, options['dim_word']), use_noise, trng, retain_probability_emb)\n # source words\n source_dropout = shared_dropout_layer((n_timesteps_lang, n_samples_lang, 1), use_noise, trng, retain_probability_source)\n source_dropout = tensor.tile(source_dropout, (1,1,options['dim_word']))\n else:\n # hidden states\n rec_dropout = theano.shared(numpy.array([1.]*2, dtype='float32'))\n rec_dropout_r = theano.shared(numpy.array([1.]*2, dtype='float32'))\n # word embeddings\n emb_dropout = theano.shared(numpy.array([1.]*2, dtype='float32'))\n emb_dropout_r = theano.shared(numpy.array([1.]*2, dtype='float32'))\n\n # Word embedding (for a particular language `lang`)\n # forward\n emb_lang = tparams['Wemb_%s'%lang][x_lang.flatten()]\n emb_lang = emb_lang.reshape([n_timesteps_lang, n_samples_lang, options['dim_word']])\n\n if options['use_dropout']:\n emb_lang *= source_dropout\n\n if options['bidirectional_enc']:\n x_lang_r = xs_r[i]\n mask_lang_r = xmasks_r[i]\n\n # backward lang encoder\n emb_lang_r = tparams['Wemb_%s'%lang][x_lang_r.flatten()]\n emb_lang_r = emb_lang_r.reshape([n_timesteps_lang, n_samples_lang, options['dim_word']])\n\n if options['use_dropout']:\n emb_lang_r *= source_dropout[::-1]\n\n # Encode sentence in language `lang`\n if options['encoder_%s'%lang] == 'bow':\n sents_lang = (emb_lang * mask_lang[:,:,None]).sum(0)\n else:\n # iteratively push input from first hidden layer until the last\n for i in range(int(options['n_enc_hidden_layers'])):\n layer_name_prefix='encoder_%s_%i'%(lang,i)\n # if first hidden layer use wembs, otherwise output of previous hidden layer\n layer_below=emb_lang if i==0 else layer_below[0]\n\n # do not apply dropout on word embeddings layer\n #if options['use_dropout'] and i>0:\n # layer_below = dropout_layer(layer_below, use_noise, trng, prob=options['dropout_prob'])\n\n layer_below=get_layer(options['encoder_%s'%lang])[1](tparams,\n layer_below, options, None, prefix=layer_name_prefix, mask=mask_lang,\n emb_dropout=emb_dropout, \n rec_dropout=rec_dropout)\n\n if i==int(options['n_enc_hidden_layers'])-1:\n # sentence embeddings (projections) are the output of the last hidden layer\n proj_lang = layer_below\n\n # apply forward and backward steps and concatenate both\n if options['bidirectional_enc']:\n # concatenate forward and backward pass RNNs\n # iteratively push input from first hidden layer until the last\n for i in range(int(options['n_enc_hidden_layers'])):\n layer_name_prefix='encoder_%s_r_%i'%(lang,i)\n # if first hidden layer use wembs, else output of prev hidden layer\n layer_below=emb_lang_r if i==0 else layer_below[0]\n\n # do not apply dropout on word embeddings layer\n #if options['use_dropout'] and i>0:\n # layer_below = dropout_layer(layer_below, use_noise, trng, prob=options['dropout_prob'])\n\n layer_below=get_layer(options['encoder_%s'%lang])[1](tparams,\n layer_below, options, None,\n prefix=layer_name_prefix,\n mask=mask_lang_r,\n emb_dropout=emb_dropout_r,\n rec_dropout=rec_dropout_r)\n\n if i==int(options['n_enc_hidden_layers'])-1:\n # sentence embeddings (projections) are the output of the last hidden layer\n proj_lang_r = layer_below\n\n # use the last state of forward + backward encoder rnns\n sents_lang = concatenate([proj_lang[0][-1], proj_lang_r[0][-1]], axis=proj_lang[0].ndim-2)\n else:\n sents_lang = proj_lang[0][-1]\n\n if options['use_dropout']:\n sents_lang *= shared_dropout_layer((n_samples_lang, options['dim']), use_noise, trng, retain_probability_hidden)\n\n # project sentences into multimodal space\n sents_mm = get_layer('ff')[1](tparams, sents_lang, options, prefix='ff_sentence_mm', activ='linear')\n\n if options['attention_type'] == 'dot':\n sents_mm = l2norm(sents_mm)\n\n if options['use_dropout']:\n sents_mm *= shared_dropout_layer((n_samples_lang, options['dim_multimodal']), use_noise, trng, retain_probability_hidden)\n\n sents_all.append(sents_mm)\n\n # Encode images\n images = get_layer('ff')[1](tparams, im, options, prefix='ff_image_mm', activ='linear')\n\n if options['attention_type'] == 'dot':\n images = l2norm(images)\n\n if options['use_dropout']:\n images *= shared_dropout_layer((n_samples, options['dim_multimodal']), use_noise, trng, retain_probability_hidden)\n\n # Compute loss\n lambda_img_sent = options['lambda_img_sent']\n lambda_sent_sent = options['lambda_sent_sent']\n if options['use_all_costs']:\n cost = contrastive_loss_all(tparams, options,\n images, sents_all, lambda_img_sent, lambda_sent_sent)\n else:\n cost = contrastive_loss(tparams, options,\n images, sents_all)\n\n # return flattened inputs\n inps = []\n inps.extend(xs)\n inps.extend(xmasks)\n inps.append(im)\n\n return trng, inps, cost", "def create_model(self):\n\t\twith tf.name_scope(\"input\"):\n\t\t\tself.user_embedding = tf.get_variable(\"user_embed\", \n\t\t\t\t[self.user_size, self.embed_size], dtype=tf.float32)\n\t\t\tself.item_embedding = tf.get_variable(\"item_embed\", \n\t\t\t\t[self.item_size, self.embed_size], dtype=tf.float32)\n\t\t\tself.user_embed = tf.nn.embedding_lookup(\n\t\t\t\t\t\t\tself.user_embedding, self.user)\n\t\t\tself.item_embed = tf.nn.embedding_lookup(\n\t\t\t\t\t\t\tself.user_embedding, self.item)\n\t\twith tf.name_scope(\"fusion\"):\n\t\t\tself.user_fusion_add = self.user_embed + self.user_feature\n\t\t\tself.item_fusion_add = self.item_embed + self.item_feature\n\n\t\t\tself.user_fusion = tf.layers.dense(inputs=self.user_fusion_add,\n\t\t\t\t\t\t\t\tunits=self.embed_size,\n\t\t\t\t\t\t\t\tactivation=self.activation_func,\n\t\t\t\t\t\t\t\tkernel_regularizer=self.regularizer,\n\t\t\t\t\t\t\t\tname='user_fusion')\n\t\t\tself.item_fusion = tf.layers.dense(inputs=self.item_fusion_add,\n\t\t\t\t\t\t\t\tunits=self.embed_size,\n\t\t\t\t\t\t\t\tactivation=self.activation_func,\n\t\t\t\t\t\t\t\tkernel_regularizer=self.regularizer,\n\t\t\t\t\t\t\t\tname='item_fusion')\n\n\t\twith tf.name_scope(\"attention\"):\n\t\t\tself.feature_all = tf.concat([\n\t\t\t\t\t\tself.user_fusion, self.item_fusion], -1)\n\t\t\tself.att_layer1 = tf.layers.dense(inputs=self.feature_all,\n\t\t\t\t\t\t\t\tunits=1,\n\t\t\t\t\t\t\t\tactivation=self.activation_func,\n\t\t\t\t\t\t\t\tkernel_regularizer=self.regularizer,\n\t\t\t\t\t\t\t\tname='att_layer1')\n\t\t\tself.att_layer2 = tf.layers.dense(inputs=self.att_layer1,\n\t\t\t\t\t\t\t\tunits=self.embed_size,\n\t\t\t\t\t\t\t\tactivation=self.activation_func,\n\t\t\t\t\t\t\t\tkernel_regularizer=self.regularizer,\n\t\t\t\t\t\t\t\tname='att_layer2')\n\t\t\tself.att_weights = tf.nn.softmax(self.att_layer2, \n\t\t\t\t\t\t\t\taxis=-1, name='att_softmax')\n\n\t\twith tf.name_scope(\"prediction\"):\n\t\t\tself.interact = self.att_weights*self.user_fusion*self.item_fusion\n\t\t\tself.interact1 = tf.layers.dense(inputs=self.interact,\n\t\t\t\t\t\t\t\tunits=self.embed_size,\n\t\t\t\t\t\t\t\tactivation=self.activation_func,\n\t\t\t\t\t\t\t\tkernel_regularizer=self.regularizer,\n\t\t\t\t\t\t\t\tname='interact1')\n\t\t\tself.interact1 = tf.nn.dropout(self.interact1, self.dropout)\n\t\t\tself.prediction = tf.layers.dense(inputs=self.interact,\n\t\t\t\t\t\t\t\tunits=1,\n\t\t\t\t\t\t\t\tactivation=None,\n\t\t\t\t\t\t\t\tkernel_regularizer=self.regularizer,\n\t\t\t\t\t\t\t\tname='prediction')\n\t\t\tself.prediction = tf.reshape(self.prediction, [-1])", "def __call__(self, params: Mapping[str, Any],\n batch: Mapping[str, jnp.ndarray]) -> PyTreeDef:\n ...", "def model_fn(features,labels,mode,params):\n tf.logging.info('*** Features ***')\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s ,shape = %s\" % (name,features[name].shape))\n\n input_ids = features['input_ids']\n input_mask = features['input_mask']\n segment_ids = features['segment_ids']\n label_ids = features['label_ids']\n if 'is_real_example' in features:\n is_real_example = tf.cast(features['is_real_example'],dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids),dtype=tf.float32)\n\n is_training = (mode == tf_estimator.estimator.ModeKeys.TRAIN)\n\n (total_loss,per_example_loss,probabilities,predictions) = \\\n create_model(albert_config,is_training,input_ids,input_mask,\n segment_ids,label_ids,num_labels,\n use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map,initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint,assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf_estimator.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(total_loss,\n learning_rate,\n num_train_steps,\n num_warmup_steps,use_tpu=False)\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n )\n elif mode == tf_estimator.estimator.ModeKeys.EVAL:\n def metric_fn(per_example_loss,label_ids,logits,is_real_example):\n accuracy = tf.metrics.accuracy(\n labels=label_ids,predictions=predictions,\n weights=is_real_example\n )\n loss = tf.metrics.mean(\n values=per_example_loss,weights=is_real_example\n )\n return {\n 'eval_accuracy':accuracy,\n 'eval_loss':loss,\n }\n\n eval_metrics = metric_fn(per_example_loss,label_ids,predictions,is_real_example)\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics\n )\n else:\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n predictions={\n 'probabilities':probabilities,\n 'predictions':predictions,\n },\n )\n\n return output_spec", "def build_model(cls, args, task):\n\n # make sure all arguments are present in older models\n base_architecture(args)\n\n if getattr(args, \"max_source_positions\", None) is None:\n args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS\n if getattr(args, \"max_target_positions\", None) is None:\n args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS\n\n args.ddp_rank = distributed_utils.get_data_parallel_rank()\n\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\n\n if args.share_all_embeddings:\n if src_dict != tgt_dict:\n raise ValueError(\"--share-all-embeddings requires a joined dictionary\")\n if args.encoder_embed_dim != args.decoder_embed_dim:\n raise ValueError(\n \"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim\"\n )\n if args.decoder_embed_path and (\n args.decoder_embed_path != args.encoder_embed_path\n ):\n raise ValueError(\n \"--share-all-embeddings not compatible with --decoder-embed-path\"\n )\n encoder_embed_tokens = cls.build_embedding(\n args, src_dict, args.encoder_embed_dim, args.encoder_embed_path\n )\n decoder_embed_tokens = encoder_embed_tokens\n args.share_decoder_input_output_embed = True\n else:\n encoder_embed_tokens = cls.build_embedding(\n args, src_dict, args.encoder_embed_dim, args.encoder_embed_path\n )\n decoder_embed_tokens = cls.build_embedding(\n args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path\n )\n if getattr(args, \"offload_activations\", False):\n args.checkpoint_activations = True # offloading implies checkpointing\n\n encoder_embed_positions = (\n PositionalEmbedding(\n args.max_source_positions,\n args.encoder_embed_dim,\n src_dict.pad(),\n learned=args.encoder_learned_pos,\n )\n if not args.no_token_positional_embeddings\n else None\n )\n\n decoder_embed_positions = (\n PositionalEmbedding(\n args.max_target_positions,\n args.decoder_embed_dim,\n tgt_dict.pad(),\n learned=args.decoder_learned_pos,\n )\n if not args.no_token_positional_embeddings\n else None\n )\n\n if args.share_decoder_input_output_embed:\n output_projection = torch.nn.Linear(\n decoder_embed_tokens.weight.shape[1],\n decoder_embed_tokens.weight.shape[0],\n bias=False,\n )\n output_projection.weight = decoder_embed_tokens.weight\n else:\n output_projection = torch.nn.Linear(\n args.decoder_embed_dim, len(tgt_dict), bias=False\n )\n torch.nn.init.normal_(\n output_projection.weight, mean=0, std=args.decoder_embed_dim**-0.5\n )\n\n encoder = cls.build_encoder(\n args,\n encoder_embed_tokens,\n encoder_embed_positions,\n src_dict,\n )\n decoder = cls.build_decoder(\n args,\n decoder_embed_tokens,\n decoder_embed_positions,\n output_projection,\n tgt_dict,\n )\n\n if not args.share_all_embeddings:\n min_params_to_wrap = getattr(\n args, \"min_params_to_wrap\", DEFAULT_MIN_PARAMS_TO_WRAP\n )\n # fsdp_wrap is a no-op when --ddp-backend != fully_sharded\n encoder = fsdp_wrap(encoder, min_num_params=min_params_to_wrap)\n decoder = fsdp_wrap(decoder, min_num_params=min_params_to_wrap)\n return cls(args, encoder, decoder)", "def train(self, *args, **kwargs):\n raise NotImplementedError", "def create_model(scene_dim_x, scene_dim_y, scene_dim_z):\n input_placeholder = tf.placeholder(\n tf.float32,\n shape=[1, scene_dim_z, scene_dim_y, scene_dim_x, 2],\n name='pl_scan')\n target_placeholder = tf.placeholder(\n tf.float32,\n shape=[1, scene_dim_z, scene_dim_y, scene_dim_x, 2],\n name='pl_target')\n target_lo_placeholder = tf.placeholder(\n tf.float32,\n shape=[1, scene_dim_z // 2, scene_dim_y // 2, scene_dim_x // 2, 2],\n name='pl_target_lo')\n target_sem_placeholder = tf.placeholder(\n tf.uint8,\n shape=[1, scene_dim_z, scene_dim_y, scene_dim_x],\n name='pl_target_sem')\n target_sem_lo_placeholder = tf.placeholder(\n tf.uint8,\n shape=[1, scene_dim_z // 2, scene_dim_y // 2, scene_dim_x // 2],\n name='pl_target_sem_lo')\n # No previous level input if at base level.\n if FLAGS.is_base_level:\n target_scan_low_resolution = None\n target_semantics_low_resolution = None\n else:\n target_scan_low_resolution = target_lo_placeholder\n target_semantics_low_resolution = target_sem_lo_placeholder\n logits = model.model(\n input_scan=input_placeholder,\n target_scan_low_resolution=target_scan_low_resolution,\n target_scan=target_placeholder,\n target_semantics_low_resolution=target_semantics_low_resolution,\n target_semantics=target_sem_placeholder,\n num_quant_levels=FLAGS.num_quant_levels,\n predict_semantics=FLAGS.predict_semantics,\n use_p_norm=FLAGS.p_norm > 0)\n return (input_placeholder, target_placeholder, target_lo_placeholder,\n target_sem_placeholder, target_sem_lo_placeholder, logits)", "def build_tf_graph(self):\n raise NotImplementedError", "def main():\n\n args = define_and_process_args()\n print('\\n', 'ARGUMENTS', '\\n\\n', args, '\\n')\n\n log_dir = get_log_dir(args)\n print('\\n', 'LOG DIRECTORY', '\\n\\n', log_dir, '\\n')\n\n standardized_data_path = os.path.join(args.data_dir, args.data_filename)\n if not os.path.exists(standardized_data_path):\n message = '%s does not exist.' % standardized_data_path\n raise ValueError(message)\n\n dataset = data.Dataset(standardized_data_path)\n train_raw_seqs, test_raw_seqs = dataset.get_splits(args.test_users)\n train_triplets = [data.prepare_raw_seq(seq) for seq in train_raw_seqs]\n test_triplets = [data.prepare_raw_seq(seq) for seq in test_raw_seqs]\n\n train_input_seqs, train_reset_seqs, train_label_seqs = zip(*train_triplets)\n test_input_seqs, test_reset_seqs, test_label_seqs = zip(*test_triplets)\n\n Model = eval('models.' + args.model_type + 'Model')\n input_size = dataset.input_size\n target_size = dataset.num_classes\n\n # This is just to satisfy a low-CPU requirement on our cluster\n # when using GPUs.\n if 'CUDA_VISIBLE_DEVICES' in os.environ:\n config = tf.ConfigProto(intra_op_parallelism_threads=2,\n inter_op_parallelism_threads=2)\n else:\n config = None\n\n with tf.Session(config=config) as sess:\n model = Model(input_size, target_size, args.num_layers,\n args.hidden_layer_size, args.init_scale,\n args.dropout_keep_prob)\n optimizer = optimizers.Optimizer(\n model.loss, args.num_train_sweeps, args.initial_learning_rate,\n args.num_initial_sweeps, args.num_sweeps_per_decay,\n args.decay_factor, args.max_global_grad_norm)\n train(sess, model, optimizer, log_dir, args.batch_size,\n args.num_sweeps_per_summary, args.num_sweeps_per_save,\n train_input_seqs, train_reset_seqs, train_label_seqs,\n test_input_seqs, test_reset_seqs, test_label_seqs)", "def __init__(\n self,\n model_dir,\n model_filename=\"model.pdmodel\",\n params_filename=\"model.pdiparams\",\n precision=\"fp32\",\n use_trt=False,\n use_mkldnn=False,\n batch_size=1,\n device=\"CPU\",\n min_subgraph_size=3,\n use_dynamic_shape=False,\n cpu_threads=1,\n ):\n self.rerun_flag = False\n if device != \"GPU\" and use_trt:\n raise ValueError(\n \"Predict by TensorRT mode: {}, expect device=='GPU', but device == {}\".format(precision, device)\n )\n config = Config(os.path.join(model_dir, model_filename), os.path.join(model_dir, params_filename))\n if device == \"GPU\":\n # initial GPU memory(M), device ID\n config.enable_use_gpu(200, 0)\n # optimize graph and fuse op\n config.switch_ir_optim(True)\n else:\n config.disable_gpu()\n config.set_cpu_math_library_num_threads(cpu_threads)\n config.switch_ir_optim()\n if use_mkldnn:\n config.enable_mkldnn()\n if precision == \"int8\":\n config.enable_mkldnn_int8({\"conv2d\", \"depthwise_conv2d\", \"pool2d\", \"transpose2\", \"elementwise_mul\"})\n if precision == \"bf16\":\n config.enable_mkldnn_bfloat16()\n\n if precision == \"bf16\":\n config.enable_mkldnn_bfloat16()\n\n if use_trt:\n if precision == \"bf16\":\n print(\"paddle trt does not support bf16, switching to fp16.\")\n precision = \"fp16\"\n\n precision_map = {\n \"int8\": Config.Precision.Int8,\n \"fp32\": Config.Precision.Float32,\n \"fp16\": Config.Precision.Half,\n }\n assert precision in precision_map.keys()\n\n if use_dynamic_shape:\n dynamic_shape_file = os.path.join(model_dir, \"dynamic_shape.txt\")\n if os.path.exists(dynamic_shape_file):\n config.enable_tuned_tensorrt_dynamic_shape()\n print(\"trt set dynamic shape done!\")\n else:\n # In order to avoid memory overflow when collecting dynamic shapes, it is changed to use CPU.\n config.disable_gpu()\n config.set_cpu_math_library_num_threads(10)\n config.collect_shape_range_info(dynamic_shape_file)\n print(\"Start collect dynamic shape...\")\n self.rerun_flag = True\n\n if not self.rerun_flag:\n config.enable_tensorrt_engine(\n workspace_size=1 << 30,\n max_batch_size=batch_size,\n min_subgraph_size=min_subgraph_size,\n precision_mode=precision_map[precision],\n use_static=True,\n use_calib_mode=False,\n )\n\n # enable shared memory\n config.enable_memory_optim()\n self.predictor = create_predictor(config)\n self.input_handles = [self.predictor.get_input_handle(name) for name in self.predictor.get_input_names()]\n self.output_handles = [self.predictor.get_output_handle(name) for name in self.predictor.get_output_names()]\n print(\"[Paddle Inference Backend] Completed PaddleInferenceEngine init ...\")", "def main():\n\n args = get_arguments()\n\n w, h = map(int, args.input_size.split(','))\n\n config_path = os.path.join(os.path.dirname(args.restore_from),'opts.yaml')\n with open(config_path, 'r') as stream:\n config = yaml.load(stream)\n\n args.model = config['model']\n print('ModelType:%s'%args.model)\n print('NormType:%s'%config['norm_style'])\n gpu0 = args.gpu\n batchsize = args.batchsize\n\n model_name = os.path.basename( os.path.dirname(args.restore_from) )\n #args.save += model_name\n\n if not os.path.exists(args.save):\n os.makedirs(args.save)\n confidence_path = os.path.join(args.save, 'submit/confidence')\n label_path = os.path.join(args.save, 'submit/labelTrainIds')\n label_invalid_path = os.path.join(args.save, 'submit/labelTrainIds_invalid')\n for path in [confidence_path, label_path, label_invalid_path]:\n if not os.path.exists(path):\n os.makedirs(path)\n\n if args.model == 'DeepLab':\n model = DeeplabMulti(num_classes=args.num_classes, use_se = config['use_se'], train_bn = False, norm_style = config['norm_style'])\n elif args.model == 'Oracle':\n model = Res_Deeplab(num_classes=args.num_classes)\n if args.restore_from == RESTORE_FROM:\n args.restore_from = RESTORE_FROM_ORC\n elif args.model == 'DeeplabVGG':\n model = DeeplabVGG(num_classes=args.num_classes)\n if args.restore_from == RESTORE_FROM:\n args.restore_from = RESTORE_FROM_VGG\n\n if args.restore_from[:4] == 'http' :\n saved_state_dict = model_zoo.load_url(args.restore_from)\n else:\n saved_state_dict = torch.load(args.restore_from)\n\n try:\n model.load_state_dict(saved_state_dict)\n except:\n model = torch.nn.DataParallel(model)\n model.load_state_dict(saved_state_dict)\n model.eval()\n model.cuda(gpu0)\n\n testloader = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(h, w), resize_size=(w, h), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),\n batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)\n\n scale = 1.25\n testloader2 = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(round(h*scale), round(w*scale) ), resize_size=( round(w*scale), round(h*scale)), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),\n batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)\n\n\n if version.parse(torch.__version__) >= version.parse('0.4.0'):\n interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)\n else:\n interp = nn.Upsample(size=(1080, 1920), mode='bilinear')\n\n sm = torch.nn.Softmax(dim = 1)\n log_sm = torch.nn.LogSoftmax(dim = 1)\n kl_distance = nn.KLDivLoss( reduction = 'none')\n prior = np.load('./utils/prior_all.npy').transpose((2,0,1))[np.newaxis, :, :, :]\n prior = torch.from_numpy(prior)\n for index, img_data in enumerate(zip(testloader, testloader2) ):\n batch, batch2 = img_data\n image, _, name = batch\n image2, _, name2 = batch2\n\n inputs = image.cuda()\n inputs2 = image2.cuda()\n print('\\r>>>>Extracting feature...%04d/%04d'%(index*batchsize, args.batchsize*len(testloader)), end='')\n if args.model == 'DeepLab':\n with torch.no_grad():\n output1, output2 = model(inputs)\n output_batch = interp(sm(0.5* output1 + output2))\n\n heatmap_batch = torch.sum(kl_distance(log_sm(output1), sm(output2)), dim=1)\n\n output1, output2 = model(fliplr(inputs))\n output1, output2 = fliplr(output1), fliplr(output2)\n output_batch += interp(sm(0.5 * output1 + output2))\n del output1, output2, inputs\n\n output1, output2 = model(inputs2)\n output_batch += interp(sm(0.5* output1 + output2))\n output1, output2 = model(fliplr(inputs2))\n output1, output2 = fliplr(output1), fliplr(output2)\n output_batch += interp(sm(0.5 * output1 + output2))\n del output1, output2, inputs2\n ratio = 0.95\n output_batch = output_batch.cpu() / 4\n # output_batch = output_batch *(ratio + (1 - ratio) * prior)\n output_batch = output_batch.data.numpy()\n heatmap_batch = heatmap_batch.cpu().data.numpy()\n elif args.model == 'DeeplabVGG' or args.model == 'Oracle':\n output_batch = model(Variable(image).cuda())\n output_batch = interp(output_batch).cpu().data.numpy()\n\n output_batch = output_batch.transpose(0,2,3,1)\n score_batch = np.max(output_batch, axis=3)\n output_batch = np.asarray(np.argmax(output_batch, axis=3), dtype=np.uint8)\n\n threshold = 0.3274\n for i in range(output_batch.shape[0]):\n output_single = output_batch[i,:,:]\n output_col = colorize_mask(output_single)\n output = Image.fromarray(output_single)\n\n name_tmp = name[i].split('/')[-1]\n dir_name = name[i].split('/')[-2]\n save_path = args.save + '/' + dir_name\n if not os.path.isdir(save_path):\n os.mkdir(save_path)\n output.save('%s/%s' % (save_path, name_tmp))\n print('%s/%s' % (save_path, name_tmp))\n output_col.save('%s/%s_color.png' % (save_path, name_tmp.split('.')[0]))\n\n # heatmap_tmp = heatmap_batch[i,:,:]/np.max(heatmap_batch[i,:,:])\n # fig = plt.figure()\n # plt.axis('off')\n # heatmap = plt.imshow(heatmap_tmp, cmap='viridis')\n # fig.colorbar(heatmap)\n # fig.savefig('%s/%s_heatmap.png' % (save_path, name_tmp.split('.')[0]))\n\n if args.set == 'test' or args.set == 'val':\n # label\n output.save('%s/%s' % (label_path, name_tmp))\n # label invalid\n output_single[score_batch[i, :, :] < threshold] = 255\n output = Image.fromarray(output_single)\n output.save('%s/%s' % (label_invalid_path, name_tmp))\n # conficence\n\n confidence = score_batch[i, :, :] * 65535\n confidence = np.asarray(confidence, dtype=np.uint16)\n print(confidence.min(), confidence.max())\n iio.imwrite('%s/%s' % (confidence_path, name_tmp), confidence)\n\n return args.save", "def _ProjectImpl(self, tensor_names: List[Text]) -> \"TFXIO\":", "def convert_to_model(self, *args):", "def run_model (arguments):\n if arguments.train is not None:\n # Train a new model, optionally with a certain number of epochs\n predictor = None\n if len(arguments.train) > 0:\n predictor = train(n_epochs=arguments.train[0])\n else:\n predictor = train()\n # Afterwards save it\n now = datetime.now(timezone.utc)\n predictor.to_disk(fname=f\"model_parameters_{now.strftime('%Y%m%d%H%M%S')}\")\n elif arguments.export_embeddings:\n # Load the saved predictor ...\n predictor = Predictor.from_file()\n # ... and then dump the models to disk.\n predictor.subj.export_embeddings(\"subject\")\n predictor.obj.export_embeddings(\"object\")\n print(\"Models are saved to output directory for loading with http://projector.tensorflow.org/.\")\n elif arguments.console:\n # Opens a console for prediction without training\n predictor = Predictor.from_file()\n tinker(predictor)", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = model_function.create(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu,\n scope=(\"loss\" if model_function.freeze else None))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n if model_function.task_type == TaskType.CLASSIFICATION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(label_ids, predictions)\n loss = tf.metrics.mean(per_example_loss)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n elif model_function.task_type == TaskType.REGRESSION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n ground_truth = tf.log1p(tf.clip_by_value(tf.cast(label_ids, tf.float32), 1e-8, 1e+30))\n predictions = tf.log1p(tf.clip_by_value(logits, 1e-8, 1e+30))\n return {\n \"eval_loss\": tf.metrics.mean(per_example_loss),\n \"another_loss\": tf.metrics.mean_squared_error(ground_truth, predictions)\n }\n else:\n raise NotImplementedError()\n\n eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n predictions = {\n \"result\": probabilities\n }\n print(probabilities.shape)\n print(type(probabilities))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n return output_spec", "def main(args):\n with tf.Graph().as_default():\n with tf.Session() as sess:\n # prepare validate datasets\n train_x, train_y, test_x, test_y = load_dataset()\n\n # Load the modelc\n load_model(args.model)\n\n # Get input and output tensors, ignore phase_train_placeholder\n # for it have default value.\n inputs_placeholder = tf.get_default_graph().get_tensor_by_name(\"input:0\")\n\n feature_maps = tf.get_default_graph().\\\n get_tensor_by_name('MobileFaceNet/MobileFaceNet/'\n 'Conv2d_4_InvResBlock_5/Conv/Conv2D:0')\n\n feature_maps_r = tf.reshape(feature_maps, [-1, 3, 14, 14, 256])\n anchor_feature_maps = feature_maps_r[:, 0, :, :, :]\n pos_feature_maps = feature_maps_r[:, 1, :, :, :]\n neg_feature_maps = feature_maps_r[:, 2, :, :, :]\n\n anchor_labels = tf.placeholder(tf.int32, [None, 3], name='anchor_labels')\n neg_labels = tf.placeholder(tf.int32, [None, 3], name='neg_labels')\n test_labels = tf.placeholder(tf.int32, [None, 3], name='test_labels')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n\n anchor_logits, anchor_feature = network(feature_maps=anchor_feature_maps, keep_prob=0.5)\n pos_logits, pos_feature = network(feature_maps=pos_feature_maps, keep_prob=0.5)\n neg_logits, neg_feature = network(feature_maps=neg_feature_maps, keep_prob=0.5)\n test_logits, _ = network(feature_maps=feature_maps, keep_prob=1.0,\n is_training=False, reuse=True)\n train_accuracy = calculate_accuracy(logit=anchor_logits, label=anchor_labels,\n name='train_accuracy')\n test_accuracy = calculate_accuracy(logit=test_logits, label=test_labels,\n name='test_accuracy')\n\n with tf.name_scope(\"retrain_loss\"):\n pos_pair_loss = tf.losses.mean_squared_error(anchor_feature, pos_feature)\n temp_neg_pair_loss = tf.losses.mean_squared_error(anchor_feature, neg_feature)\n neg_pair_loss = tf.maximum(0.0, 2.0 - temp_neg_pair_loss)\n\n anchor_loss = cross_entropy_loss(anchor_logits, anchor_labels)\n pos_loss = cross_entropy_loss(pos_logits, anchor_labels)\n neg_loss = cross_entropy_loss(neg_logits, neg_labels)\n\n loss = anchor_loss + pos_loss + neg_loss + pos_pair_loss + neg_pair_loss\n\n with tf.name_scope(\"retrain_op\"): # not shown in the book\n optimizer = tf.train.GradientDescentOptimizer(learning_rate) # not shown\n train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n scope=\"retrain_net\")\n train_op = optimizer.minimize(loss, var_list=train_vars)\n\n uninitialized_vars = []\n for var in tf.all_variables():\n try:\n sess.run(var)\n except tf.errors.FailedPreconditionError:\n uninitialized_vars.append(var)\n\n init_new_vars_op = tf.initialize_variables(uninitialized_vars)\n sess.run(init_new_vars_op)\n\n model_path = './checkpoint/SR_model/'\n new_saver = tf.train.Saver()\n\n batch_size = 7\n n_epochs = 45\n train_iteration = len(train_x) // batch_size\n test_iteration = len(test_x) // batch_size\n\n # loop for epoch\n best_test_accuracy = 0\n epoch_lr = 0.0005\n for epoch in range(0, n_epochs):\n if epoch == int(epoch * 0.33) or epoch == int(epoch * 0.66):\n epoch_lr = epoch_lr * 0.1\n\n # get batch data\n for idx in range(0, train_iteration):\n batch_x = train_x[idx * batch_size:(idx + 1) * batch_size]\n batch_y = train_y[idx * batch_size:(idx + 1) * batch_size]\n\n concat_data = convert_train_data_batch(batch_x)\n anchor_label_r, neg_label_r = convert_train_label_batch(batch_y)\n\n train_feed_dict = {\n inputs_placeholder: concat_data,\n anchor_labels: anchor_label_r,\n neg_labels: neg_label_r,\n learning_rate: epoch_lr\n }\n\n _, train_accuracy_v, anchor_loss_v, pos_loss_v, neg_loss_v, pos_pair_loss_v, \\\n neg_pair_loss_v = sess.run([train_op, train_accuracy,\n anchor_loss, pos_loss, neg_loss, pos_pair_loss,\n neg_pair_loss], feed_dict=train_feed_dict)\n\n if idx % 10 == 0:\n # display training status\n print('Epoch: [%2d][%4d/%4d] Anchor Loss %.4f Pos Loss %.4f Neg Loss %.4f '\n 'Pos Pair Loss %.4f Neg Pair Loss %.4f Prec %.4f\\t'\n % (epoch, idx, train_iteration, anchor_loss_v, pos_loss_v, neg_loss_v,\n pos_pair_loss_v, neg_pair_loss_v, train_accuracy_v))\n\n total_test_accuracy = 0\n for idx in range(test_iteration):\n batch_x = test_x[idx * batch_size:(idx + 1) * batch_size]\n batch_y = test_y[idx * batch_size:(idx + 1) * batch_size]\n\n batch_x_r = convert_test_data_batch(batch_x)\n batch_y_r = convert_test_label_batch(batch_y)\n\n test_feed_dict = {\n inputs_placeholder: batch_x_r,\n test_labels: batch_y_r\n }\n\n test_accuracy_v = sess.run(test_accuracy, feed_dict=test_feed_dict)\n\n total_test_accuracy += test_accuracy_v\n total_test_accuracy /= test_iteration\n\n # display training status\n print(\"Epoch: [%2d/%2d]\\ttest_accuracy: %.2f\" \\\n % (epoch, n_epochs, total_test_accuracy))\n\n # save model\n if best_test_accuracy < total_test_accuracy:\n best_test_accuracy = total_test_accuracy\n new_saver.save(sess, os.path.join(model_path, 'SR_model_%2.4f.ckpt'\n % best_test_accuracy))", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (subject_logits, property_logits, value_logits) = self.create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n params=params,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions, depth):\n one_hot_positions = tf.one_hot(\n positions, depth=depth, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n # subject, property, value로 나오도록\n subject_label = features[\"subject\"]\n property_label = features[\"property\"]\n value_label = features[\"value\"]\n res_length = params[\"res_length\"]\n ont_length = params[\"ont_length\"]\n\n subject_loss = compute_loss(subject_logits, subject_label, res_length)\n property_loss = compute_loss(property_logits, property_label, ont_length)\n value_loss = compute_loss(value_logits, value_label, res_length)\n\n total_loss = (subject_loss + property_loss + value_loss) / 3.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"subject_logits\": subject_logits,\n \"property_logits\": property_logits,\n \"value_logits\": value_logits,\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec", "def build(self):\n\n self.W = self.init([self.n_atom_input_feat, self.n_output])\n self.b = model_ops.zeros(shape=[\n self.n_output,\n ])\n\n self.trainable_weights = self.W + self.b", "def main():\n args = get_arguments()\n\n # Create network.\n net = DeepLabLFOVModel()\n\n # Which variables to load.\n trainable = tf.trainable_variables()\n\n \n # Set up TF session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.initialize_all_variables()\n \n sess.run(init)\n \n # Load weights.\n saver = tf.train.Saver(var_list=trainable)\n load(saver, sess, args.model_weights)\n\n path_to_save_parmas = '../pretrained_params/'\n net.save_convs_params(sess, path_to_save_parmas)\n #writer = tf.summary.FileWriter('./deeplab_graph/', sess.graph)", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n (total_loss, per_example_loss, log_probs) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n scaffold_fn = None\n initialized_variable_names = []\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n\n elif mode == tf.estimator.ModeKeys.PREDICT:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\n \"log_probs\": log_probs,\n \"label_ids\": label_ids,\n },\n scaffold_fn=scaffold_fn)\n\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec", "def build(model_name):\n return pretrain.factory.create(model_name)", "def build_model(cls, args, task):\n global PAD_IDX, EOS_IDX\n # make sure all arguments are present in older models\n w2v_lm_architecture2(args)\n\n if not hasattr(args, \"max_source_positions\"):\n args.max_source_positions = 2048\n if not hasattr(args, \"max_target_positions\"):\n args.max_target_positions = 2048\n\n tgt_dict = task.target_dictionary\n PAD_IDX = tgt_dict.pad()\n EOS_IDX = tgt_dict.eos()\n\n encoder = cls.build_encoder(args)\n assigner = cls.build_assigner(args, encoder.d)\n lm = cls.build_lm(args, task)\n\n return cls(args, encoder, assigner, lm)", "def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m", "def create(self) -> tf.keras.models.Model:\n raise NotImplementedError()", "def __init__(self, model_name, logger=None, gpu_ids=None):\n super().__init__(model_name, 'generator', logger, gpu_ids)", "def _tf2_ ( self , *args ) :\n ##\n if not hasattr ( self , '_wo2' ) : self._wo2 = _WO2_ ( self )\n if not self._wo2 : self._wo2 = _WO2_ ( self )\n ## \n _wo = self._wo2\n fun = ROOT.TF2 ( funID () , _wo , *args )\n fun.SetNpx ( 100 ) \n fun.SetNpy ( 100 ) \n #\n return fun", "def __init__(self , model_file_name ):\n logging.set_verbosity(logging.ERROR)\n with TheTFGraph.as_default():\n with TheTFSession.as_default():\n self.model = keras.models.load_model( model_file_name + \".hdf5\" , compile=False )\n JSON = json.load( open(model_file_name + \".json\" ) )\n self.all_sites = list(JSON['all_sites'])\n self.all_errors = list(JSON['all_errors'])\n self.all_actions = list(JSON['all_actions'])\n self.IsBinary = bool(JSON['IsBinary'])\n self.TiersOnly = bool(JSON['TiersOnly'])\n self.Task = Task({} , \"TaskLoader\" , self)\n self.Name = model_file_name.split('/')[-1]\n self.ModelID = int( JSON['model'] )\n self.InputTrainingDataID = int( JSON['trainingdata'])\n\n self.Prediction = Prediction.Prediction( self.ModelID , self.InputTrainingDataID )", "def model_fn(features,labels,mode,params):\n input_q = features[\"input_q\"] # query feature vector\n input_K = features[\"input_K\"] # Key set Matrix\n input_v = features[\"input_v\"] # image visual feature vector\n input_labels = features[\"input_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = modeling.AMT(\n config = config,\n is_trainging = is_training, \n scope = \"AMT\",\n input_q = input_q,\n input_K = input_K,\n input_v = input_v\n )\n loss = model.loss\n q_doc_rank = model.get_predict()\n output_spec = None\n scaffold_fn = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer()\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n loss = loss,\n train_op = train_op,\n scaffold_fn = scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn():\n return 0\n else:\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n predictions = q_doc_rank,\n scaffold_fn = scaffold_fn)\n return output_spec", "def __init__(self,\n model,\n weights,\n singa_ops,\n keep_initializers_as_inputs=True):\n super(SingaRep, self).__init__()\n self.model = model\n self.tensor_map = weights\n self.keep_initializers_as_inputs = keep_initializers_as_inputs\n # this each item of singa_ops is: ('name', 'op', 'handle', 'forward')\n # the name is a string, op is OnnxNode,\n # handle is Singa handle to store the tensor into singa operator\n # the forward is singa autograd operator\n self.singa_ops = singa_ops", "def initialize_model(\n *,\n model_def: nn.Module,\n input_spec: Sequence[Union[Tuple[Tuple[int, ...], jnp.dtype],\n Tuple[int, ...], None]],\n config: ml_collections.ConfigDict,\n rngs: Union[jnp.ndarray, Mapping[str, jnp.ndarray]],\n) -> Tuple[PyTree, PyTree, int, Optional[float]]:\n batch_size = (config.batch_size //\n jax.device_count()) if config.get('batch_size') else None\n dummy_input = []\n for spec in input_spec:\n if spec is not None:\n in_st = debug_utils.input_spec_to_jax_shape_dtype_struct(\n spec, batch_size=batch_size)\n dummy_input.append(jnp.zeros(in_st.shape, in_st.dtype))\n else:\n dummy_input.append(None)\n\n # We want all parameters to be created in host RAM, not on any device, they'll\n # be sent there later as needed, otherwise we already encountered two\n # situations where we allocate them twice.\n @functools.partial(jax.jit, backend='cpu')\n def _initialize_model(rngs):\n \"\"\"Initialization function to be jitted.\"\"\"\n init_model_state, init_params = model_def.init(\n rngs, *dummy_input, train=False, debug=False).pop('params')\n # Set bias in the head to low value, such that loss is small initially.\n if config.get('init_head_bias', None) is not None:\n init_params = flax.core.unfreeze(init_params)\n init_params['output_projection'] = optimizers.tree_map_with_names(\n lambda p: jnp.full_like(p, config.init_head_bias),\n init_params['output_projection'],\n match_name_fn=lambda name: 'bias' in name)\n init_params = flax.core.freeze(init_params)\n return init_params, init_model_state\n\n if not isinstance(rngs, dict):\n rngs = {'params': rngs}\n init_params, init_model_state = _initialize_model(rngs)\n # Pop out params rng:\n rngs.pop('params')\n\n # Count number of trainable parameters:\n num_trainable_params = debug_utils.log_param_shapes(init_params)\n\n # Count gflops:\n count_flops = config.get('count_flops',\n ml_collections.ConfigDict({'count_flops': True}))\n if count_flops:\n variables = {'params': init_params, **init_model_state}\n flops = debug_utils.compute_flops(\n flax_model_apply_fn=functools.partial(\n model_def.apply, variables, train=False, debug=False, rngs=rngs),\n input_spec=count_flops.get('input_spec', input_spec),\n fuse_multiply_add=count_flops.get('fuse_multiply_add', True))\n gflops = flops / (10**9)\n else:\n gflops = None\n\n return init_params, init_model_state, num_trainable_params, gflops", "def build_model(self, constructor, args):\n dims = {'en': 300, 'es': 50}\n dists = {'en': 'Normal',\n 'es': 'Normal',}\n z_dim = args.model_args.get('z_dim', 64)\n h_dim = args.model_args.get('h_dim', 64)\n n_layers = args.model_args.get('n_layers', 3)\n gauss_out = (args.model != 'MultiDKS') \n encoders = {'en': models.common.DeepGaussianMLP(dims['en'], z_dim, h_dim, n_layers),\n 'es': models.common.DeepGaussianMLP(dims['es'], z_dim, h_dim, n_layers)}\n decoders = {'en': models.common.DeepGaussianMLP(z_dim, dims['en'], h_dim, n_layers),\n 'es': models.common.DeepGaussianMLP(z_dim, dims['es'], h_dim, n_layers)}\n custom_mods = [m for m in ['en', 'es'] if m in args.modalities]\n model = constructor(args.modalities,\n dims=(dims[m] for m in args.modalities),\n dists=[dists[m] for m in args.modalities],\n encoders={m: encoders[m] for m in custom_mods},\n decoders={m: decoders[m] for m in custom_mods},\n z_dim=z_dim, h_dim=h_dim,\n device=args.device, **args.model_args)\n return model", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n # tf.logging.info(\"*** Features ***\")\n # for name in sorted(features.keys()):\n # tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n is_real_example = None\n if \"is_real_example\" in features:\n is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings,\n )\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (\n assignment_map,\n initialized_variable_names,\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu\n )\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode, loss=total_loss, train_op=train_op, scaffold=scaffold_fn\n )\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, probabilities, is_real_example):\n\n logits_split = tf.split(probabilities, num_labels, axis=-1)\n label_ids_split = tf.split(label_ids, num_labels, axis=-1)\n # metrics change to auc of every class\n eval_dict = {}\n for j, logits in enumerate(logits_split):\n label_id_ = tf.cast(label_ids_split[j], dtype=tf.int32)\n current_auc, update_op_auc = tf.metrics.auc(label_id_, logits)\n eval_dict[str(j)] = (current_auc, update_op_auc)\n eval_dict[\"eval_loss\"] = tf.metrics.mean(values=per_example_loss)\n return eval_dict\n\n\n eval_metrics = metric_fn(\n per_example_loss, label_ids, probabilities, is_real_example\n )\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics,\n scaffold=scaffold_fn,\n )\n else:\n out = {\n \"input_ids\": input_ids,\n \"label_ids\": label_ids,\n }\n all_layers = model.get_all_encoder_layers()\n for (i, layer_index) in enumerate(layer_indexes):\n out[\"layer_output_%d\" % i] = all_layers[layer_index]\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode, predictions=out, scaffold=scaffold_fn\n )\n return output_spec", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n input_mask = features[\"input_mask\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n is_prediction = (mode == tf.estimator.ModeKeys.PREDICT)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings, is_prediction)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, False)\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n )\n elif mode == tf.estimator.ModeKeys.EVAL:\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions)\n loss = tf.metrics.mean(values=per_example_loss)\n eval_metrics = {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics\n )\n else:\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n export_outputs={'predict': tf.estimator.export.PredictOutput(outputs=probabilities)}\n )\n return output_spec", "def new(cls, args, src_meta, trg_meta, waitk_lagging, name=None):\n # build source and target modality\n src_modality, trg_modality = cls.build_modalities(args, src_meta, trg_meta)\n encoder_params, decoder_params = {}, {}\n for f in cls.class_or_method_args():\n if f.name in args:\n if f.name.startswith(\"encoder.\"):\n encoder_params[f.name[8:]] = args[f.name]\n elif f.name.startswith(\"decoder.\"):\n decoder_params[f.name[8:]] = args[f.name]\n # build encoder and decoder\n encoder = build_encoder({\n \"encoder.class\": \"TransformerEncoder\",\n \"encoder.params\": encoder_params})\n decoder = build_decoder({\n \"decoder.class\": \"TransformerDecoder\",\n \"decoder.params\": decoder_params})\n model = cls(args, src_meta, trg_meta, src_modality, trg_modality,\n encoder, decoder, name=name)\n model.wait_k = waitk_lagging\n _ = model({\"src\": tf.convert_to_tensor([[1, 2, 3]], tf.int64),\n \"src_padding\": tf.convert_to_tensor([[0, 0., 0]], tf.float32),\n \"trg_input\": tf.convert_to_tensor([[1, 2, 3]], tf.int64)})\n return model", "def main(args):\n # Use CUDA\n use_cuda = args.use_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n # Fix random seed\n torch.manual_seed(args.seed)\n\n # Generate token-to-index and index-to-token mapping\n tok2id, id2tok = data_loader.build_or_load_vocab(\n args.train, overwrite=False)\n\n print(\"*\" * 5)\n print(args)\n\n # Create DataLoader() objects\n params = {\n \"batch_size\": args.batch_size,\n \"collate_fn\": data_loader.collate_fn,\n \"shuffle\": args.shuffle,\n \"num_workers\": args.num_workers,\n }\n # train_dataset = data_loader.SNLIDataSet(args.train, tok2id)\n # train_loader = torch.utils.data.DataLoader(train_dataset, **params)\n val_dataset = data_loader.SNLIDataSet(args.val, tok2id)\n val_loader = torch.utils.data.DataLoader(val_dataset, **params)\n\n # Initialize model\n if args.model == \"rnn\": # RNN model\n model = RNN(\n vocab_size=const.MAX_VOCAB_SIZE, # Vocabulary size\n emb_dim=const.EMB_DIM, # Embedding dimensions\n hidden_dim=args.hidden_dim, # Hidden dimensions\n dropout_prob=args.dropout_prob, # Dropout probability\n padding_idx=const.PAD_IDX, # Padding token index\n num_classes=const.NUM_CLASSES, # Number of class labels\n id2tok=id2tok, # Vocabulary\n ).to(device)\n # Load model weights from disk\n model.load_state_dict(torch.load(const.MODELS + \"rnn.pt\"))\n model.eval()\n elif args.model == \"cnn\": # CNN model\n model = CNN(\n vocab_size=const.MAX_VOCAB_SIZE, # Vocabulary size\n emb_dim=const.EMB_DIM, # Embedding dimensions\n hidden_dim=args.hidden_dim, # Hidden dimensions\n kernel_size=args.kernel_size, # Kernel size\n dropout_prob=args.dropout_prob, # Dropout probability\n padding_idx=const.PAD_IDX, # Padding token index\n num_classes=const.NUM_CLASSES, # Number of class labels\n id2tok=id2tok, # Vocabulary\n ).to(device)\n # Load model weights from disk\n model.load_state_dict(torch.load(const.MODELS + \"cnn.pt\"))\n model.eval()\n else:\n print(\"Invalid model specification, exiting\")\n exit()\n\n # Criterion\n criterion = torch.nn.CrossEntropyLoss()\n # Model parameters\n params = [p for p in model.parameters() if p.requires_grad]\n\n # Inspect correct/incorrect predictions\n if args.inspect:\n right, wrong = eval_model(val_loader, model, device, criterion,\n inspect=True)\n print(\"\\nValidation premises with correct predictions:\\n\")\n for i, item in enumerate(right):\n text = \" \".join([id2tok[idx] for idx in item if idx > 0])\n print(\"#{}\\n {}\".format(i + 1, text))\n print(\"\\nValidation premises with incorrect predictions:\\n\")\n for i, item in enumerate(wrong):\n text = \" \".join([id2tok[idx] for idx in item if idx > 0])\n print(\"#{}\\n {}\".format(i + 1, text))\n return\n\n # Validation\n val_acc, _ = eval_model(val_loader, model, device, criterion)\n print(\"\\n Validation accuracy: {}\".format(val_acc))\n\n print(\"*\" * 5 + \"\\n\")", "def __init__(self,args,train=True,reuse=None,model=None):\n self.max_seq_len = args.max_seq_len\n self.vocab_size = args.vocab_size\n self.hidden_size = args.hidden_size\n\n initialize = model is None # whether to initialize variables\n\n # evice = \"/cpu:0\" if args.cpu else \"\"\n self.graph = tf.Graph() if model is None else model.graph\n self.session = tf.Session(graph=self.graph) \\\n if model is None else model.session\n\n with self.graph.as_default(),\\\n tf.variable_scope(\"LanguageModel\") as vs:\n self._seq = tf.placeholder(\n tf.int64,[None,self.max_seq_len])\n self._len = tf.placeholder(\n tf.int64,[None,])\n\n cell = tf.nn.rnn_cell.BasicLSTMCell(\n self.hidden_size,state_is_tuple=True)\n\n # Running RNN through sequence\n logit, _ = self.rnn_with_embedding(\n cell,None,self._seq, self._len,reuse=reuse)\n\n logit_list = tf.unpack(tf.transpose(logit,[1,0,2]))\n seq_list = tf.unpack(tf.transpose(self._seq,[1,0]))\n seq_list = seq_list[1:]\n\n xent = self.softmax_xent_loss_sequence(\n logit_list,seq_list,self._len,self.max_seq_len)\n\n self._cost = xent\n\n if train:\n log(vs.name+\"/Adding optimizer\")\n with tf.variable_scope(\"AdamOptimizer\"):\n optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)\n self._train_op = optimizer.minimize(self._cost)\n\n if initialize:\n log(vs.name+\"/Initializing variables\")\n self.session.run(tf.initialize_all_variables())\n\n log(\"Done with constructor.\")", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n # tf.logging.info(\"*** Features ***\")\n # for name in sorted(features.keys()):\n # tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n # output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n # mode=tf.estimator.ModeKeys.PREDICT,\n # predictions=probabilities)\n output_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.PREDICT,\n predictions=probabilities\n )\n return output_spec", "def do_new(self, args):\n model_name = questionary.text(\"Target name:\").ask()\n model_name = model_name.replace(\" \", \"\")\n\n available_frameworks = list(CFState.get_instance().loaded_frameworks.keys())\n framework_choice = questionary.select(\"Which framework?\", choices=available_frameworks).ask()\n\n if \"textattack\" in framework_choice:\n framework = \"TextTarget\"\n elif \"art\" in framework_choice:\n framework = \"ArtTarget\"\n else:\n raise ValueError(\"invalid framework\")\n\n if framework == \"TextTarget\":\n model_data_type = \"text\"\n elif framework == \"ArtTarget\":\n model_data_type = questionary.select(\"What data type?\", choices=[\"numpy\", \"image\"]).ask()\n else:\n raise ValueError(\"invalid framework\")\n\n if model_name not in os.listdir(config.targets_path):\n try:\n os.mkdir(f\"{config.targets_path}/{model_name}\")\n open(f\"{config.targets_path}/{model_name}/__init__.py\", \"w\").close()\n with open(f\"{config.targets_path}/{model_name}/{model_name}.py\", \"w\") as f:\n f.write(\n f\"\"\"\n\n# Generated by counterfit #\n\nfrom counterfit.core.targets import {framework}\n\nclass {model_name.capitalize()}({framework}):\n model_name = \"{model_name.lower()}\"\n model_data_type = \"{model_data_type}\"\n model_endpoint = \"\"\n model_input_shape = ()\n model_output_classes = []\n X = []\n\n def __init__(self):\n self.X = []\n\n def __call__(self, x):\n return x\n\"\"\"\n )\n\n CFState.get_instance().import_targets()\n except Exception as e:\n\n self.pwarning(f\"\\n [!] Failed to write target file: {e}.\\n\")\n\n else:\n self.pwarning(f\"\\n [!] {model_name} already exists. Choose a new name.\\n\")", "def build_model(self) -> nn.Module:\n pass", "def _tf1_ ( self , *args ) :\n #\n if not hasattr ( self , '_wo1' ) : self._wo1 = _WO1_ ( self )\n if not self._wo1 : self._wo1 = _WO1_ ( self )\n ## \n _wo = self._wo1 \n fun = ROOT.TF1 ( funID() , _wo , *args )\n fun.SetNpx ( 500 ) \n #\n return fun", "def model_fn_builder(num_labels, learning_rate, num_train_steps, num_warmup_steps):\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.compat.v1.metrics.accuracy(label_ids, predicted_labels)\n #f1_score = tf.contrib.metrics.f1_score(\n # label_ids,\n # predicted_labels)\n #auc = tf.metrics.auc(\n # label_ids,\n # predicted_labels)\n #recall = tf.metrics.recall(\n # label_ids,\n # predicted_labels)\n #precision = tf.metrics.precision(\n # label_ids,\n # predicted_labels)\n #true_pos = tf.metrics.true_positives(\n # label_ids,\n # predicted_labels)\n #true_neg = tf.metrics.true_negatives(\n # label_ids,\n # predicted_labels)\n #false_pos = tf.metrics.false_positives(\n # label_ids,\n # predicted_labels)\n #false_neg = tf.metrics.false_negatives(\n # label_ids,\n # predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n # \"f1_score\": f1_score,\n #\"auc\": auc,\n # \"precision\": precision,\n # \"recall\": recall,\n # \"true_positives\": true_pos,\n # \"true_negatives\": true_neg,\n # \"false_positives\": false_pos,\n # \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Return the actual model function in the closure\n return model_fn", "def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0,args.ydim,args.ydim+5]),name='inp_slice')\n gt_y = lbann.Identity(inp_slice,name='gt_y')\n gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used\n\n zero = lbann.Constant(value=0.0,num_neurons='1',name='zero')\n one = lbann.Constant(value=1.0,num_neurons='1',name='one')\n\n z_dim = 20 #Latent space dim\n\n z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=\"20\")\n model = macc_models.MACCWAE(args.zdim,args.ydim,cf=args.mcf,use_CNN=args.useCNN)\n d1_real, d1_fake, d_adv, pred_y = model(z,gt_y)\n\n d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce')\n d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce')\n d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce')\n img_loss = lbann.MeanSquaredError([pred_y,gt_y])\n rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors=\"1 -1\"))\n\n layers = list(lbann.traverse_layer_graph(input))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc0\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2\n if(l.weights and \"disc1\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4)\n d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01)\n obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg])\n # Initialize check metric callback\n metrics = [lbann.Metric(img_loss, name='recon_error')]\n #pred_y = macc_models.MACCWAE.pred_y_name\n callbacks = [lbann.CallbackPrint(),\n lbann.CallbackTimer(),\n lbann.CallbackSaveModel(dir=args.dump_models),\n lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2)]\n\n if(args.ltfb_batch_interval > 0) :\n callbacks.append(lbann.CallbackLTFB(batch_interval=args.ltfb_batch_interval,metric='recon_error',\n low_score_wins=True,\n exchange_hyperparameters=True))\n\n # Construct model\n return lbann.Model(args.num_epochs,\n serialize_io=True,\n weights=weights,\n layers=layers,\n metrics=metrics,\n objective_function=obj,\n callbacks=callbacks)", "def build_model_mobilenet(num_classes):", "def _create_model(self):\n if torch.cuda.is_available():\n model = torch.jit.load(self.torch_jit).cuda()\n else:\n model = torch.jit.load(self.torch_jit)\n model.eval()\n return model", "def __init__(self, model):\r\n self._tensorflow_session = model._tensorflow_session\r\n self._model = model", "def build_model(cls, args, task):\n # from fairseq.tasks.multilingual_translation import MultilingualTranslationTask\n # assert isinstance(task, MultilingualTranslationTask)\n\n # make sure all arguments are present in older models\n base_architecture(args)\n\n if args.share_encoders:\n args.share_encoder_embeddings = True\n\n ### nat model\n # build shared embeddings (if applicable)\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\n if args.share_all_embeddings:\n if src_dict != tgt_dict:\n raise ValueError(\"--share-all-embeddings requires a joined dictionary\")\n if args.encoder_embed_dim != args.decoder_embed_dim:\n raise ValueError(\n \"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim\"\n )\n if args.decoder_embed_path and (\n args.decoder_embed_path != args.encoder_embed_path\n ):\n raise ValueError(\n \"--share-all-embeddings not compatible with --decoder-embed-path\"\n )\n encoder_embed_tokens = TransformerModel.build_embedding(\n args, src_dict, args.encoder_embed_dim, args.encoder_embed_path\n )\n decoder_embed_tokens = encoder_embed_tokens\n args.share_decoder_input_output_embed = True\n else:\n encoder_embed_tokens = TransformerModel.build_embedding(\n args, src_dict, args.encoder_embed_dim, args.encoder_embed_path\n )\n decoder_embed_tokens = TransformerModel.build_embedding(\n args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path\n )\n\n\n student_cls = ARCH_MODEL_REGISTRY[args.student_arch]\n encoder = student_cls.build_encoder(args, src_dict, encoder_embed_tokens)\n decoder = student_cls.build_decoder(args, tgt_dict, decoder_embed_tokens)\n student = student_cls(args,encoder,decoder)\n\n teacher_cls = ARCH_MODEL_REGISTRY[args.teacher_arch]\n if not issubclass(teacher_cls, NATransformerModel):\n teacher_cls = PatchedTransformerModel\n\n teacher_encoder = teacher_cls.build_encoder(\n args, src_dict,\n encoder_embed_tokens if args.share_encoder_embeddings else TransformerModel.build_embedding(\n args, src_dict, args.encoder_embed_dim, args.encoder_embed_path\n )\n )\n teacher_decoder = teacher_cls.build_decoder(\n args, tgt_dict,\n decoder_embed_tokens if args.share_decoder_embeddings else TransformerModel.build_embedding(\n args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path\n )\n )\n teacher = teacher_cls(args,teacher_encoder,teacher_decoder)\n\n return cls(args, student, teacher)", "def __init__(self, args, task_name, input_fields, conversion_name, cache=None, train_types_only=False):\n self.input_fields = input_fields\n self.task_name = task_name\n self.conversion_name = conversion_name\n self.conversion_dict = None\n self.name_to_index_dict = None\n self.cache = None\n self.ints_to_strings = {}\n self.label_vocab = {'[PAD]':0, '-':0, '_':0}\n self.train_types_only = train_types_only\n self.train_type_vocab = set()", "def _train_model(self):\n raise NotImplementedError()", "def get_model(point_cloud, feature_cloud, color_cloud, s1, s2, s3, s4, g1, g2, g3, g4, c1, c2, c3, c4, t1, t2, t3, t4, is_training, num_class, use_color=0, bn_decay=None):\n batch_size = point_cloud.get_shape()[0].value\n num_point = point_cloud.get_shape()[1].value\n l0_xyz = point_cloud\n l0_points = None\n\n if use_color == 0:\n l0_points = None\n else:\n l0_points = feature_cloud\n if use_color == 2:\n l0_cloud = TextureConv(color_cloud, is_training, bn_decay)\n l0_points = tf.concat([l0_points,l0_cloud],axis=-1)\n\n # Layer 1\n l1_xyz, l1_points = texture_geodesic_conv(s1, g1, c1, t1, l0_xyz, l0_points, npoint=1024, radius=0.1, conv_radius=0.1, conv_mlp = None, nsample=32, mlp=[32, 32, 64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1', use_color=use_color)\n l2_xyz, l2_points = texture_geodesic_conv(s2, g2, c2, t2, l1_xyz, l1_points, npoint=256, radius=0.2, conv_radius=0.2, conv_mlp = None, nsample=32, mlp=[64, 64, 128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2', use_color=use_color)\n l3_xyz, l3_points = texture_geodesic_conv(s3, g3, c3, t3, l2_xyz, l2_points, npoint=64, radius=0.4, conv_radius=0.4, conv_mlp = None, nsample=32, mlp=[128, 128, 256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3', use_color=use_color)\n l4_xyz, l4_points = texture_geodesic_conv(s4, g4, c4, t4, l3_xyz, l3_points, npoint=16, radius=0.8, conv_radius=0.8, conv_mlp = None, nsample=32, mlp=[256,256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4', use_color=use_color)\n\n # Feature Propagation layers\n l3_points = texture_geodesic_tconv(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1')\n l2_points = texture_geodesic_tconv(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2')\n l1_points = texture_geodesic_tconv(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3')\n l0_points = texture_geodesic_tconv(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4')\n\n # FC layers\n net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)\n net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')\n net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2')\n\n return net", "def __init__(self, input_seq_batch, seq_length_batch, image_feat_batch,\n num_vocab, num_choices, module_names, dropout_keep_prob,\n scope='model', reuse=None):\n\n with tf.variable_scope(scope, reuse=reuse):\n self.T_ctrl = cfg.MODEL.T_CTRL\n\n # Input unit\n lstm_seq, q_encoding, embed_seq = input_unit.build_input_unit(\n input_seq_batch, seq_length_batch, num_vocab)\n kb_batch = input_unit.build_kb_batch(image_feat_batch)\n\n # Controller and NMN\n num_module = len(module_names)\n self.controller = controller.Controller(\n lstm_seq, q_encoding, embed_seq, seq_length_batch, num_module)\n self.c_list = self.controller.c_list\n self.module_logits = self.controller.module_logits\n self.module_probs = self.controller.module_probs\n self.module_prob_list = self.controller.module_prob_list\n self.nmn = nmn.NMN(\n kb_batch, self.c_list, module_names, self.module_prob_list)\n\n # Output unit\n if cfg.MODEL.BUILD_VQA:\n self.vqa_scores = output_unit.build_output_unit_vqa(\n q_encoding, self.nmn.mem_last, num_choices,\n dropout_keep_prob=dropout_keep_prob)\n if cfg.MODEL.BUILD_LOC:\n loc_scores, bbox_offset, bbox_offset_fcn = \\\n output_unit.build_output_unit_loc(\n q_encoding, kb_batch, self.nmn.att_last)\n self.loc_scores = loc_scores\n self.bbox_offset = bbox_offset\n self.bbox_offset_fcn = bbox_offset_fcn\n\n # Reconstruction loss\n if cfg.MODEL.REC.USE_REC_LOSS:\n rec_inputs = (self.module_logits if cfg.MODEL.REC.USE_LOGITS\n else self.module_probs)\n if cfg.MODEL.REC.USE_TXT_ATT:\n rec_inputs = tf.concat(\n [rec_inputs, tf.stack(self.c_list)], axis=-1)\n self.rec_loss = output_unit.build_output_unit_rec(\n rec_inputs, input_seq_batch, embed_seq, seq_length_batch,\n num_vocab)\n else:\n self.rec_loss = tf.convert_to_tensor(0.)\n\n self.params = [\n v for v in tf.trainable_variables() if scope in v.op.name]\n self.l2_reg = tf.add_n(\n [tf.nn.l2_loss(v) for v in self.params\n if v.op.name.endswith('weights')])\n\n # tensors for visualization\n self.vis_outputs = {\n 'txt_att': # [N, T, S]\n tf.transpose( # [S, N, T] -> [N, T, S]\n tf.concat(self.controller.cv_list, axis=2), (1, 2, 0)),\n 'att_stack': # [N, T, H, W, L]\n tf.stack(self.nmn.att_stack_list, axis=1),\n 'stack_ptr': # [N, T, L]\n tf.stack(self.nmn.stack_ptr_list, axis=1),\n 'module_prob': # [N, T, D]\n tf.stack(self.module_prob_list, axis=1)}\n if cfg.MODEL.BUILD_VQA:\n self.vis_outputs['vqa_scores'] = self.vqa_scores\n if cfg.MODEL.BUILD_LOC:\n self.vis_outputs['loc_scores'] = self.loc_scores\n self.vis_outputs['bbox_offset'] = self.bbox_offset", "def model_fn_builder(albert_config,num_labels,init_checkpoint,learning_rate,\n num_train_steps,num_warmup_steps,\n use_one_hot_embeddings,optimizer='adamw'):\n\n def model_fn(features,labels,mode,params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n tf.logging.info('*** Features ***')\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s ,shape = %s\" % (name,features[name].shape))\n\n input_ids = features['input_ids']\n input_mask = features['input_mask']\n segment_ids = features['segment_ids']\n label_ids = features['label_ids']\n if 'is_real_example' in features:\n is_real_example = tf.cast(features['is_real_example'],dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids),dtype=tf.float32)\n\n is_training = (mode == tf_estimator.estimator.ModeKeys.TRAIN)\n\n (total_loss,per_example_loss,probabilities,predictions) = \\\n create_model(albert_config,is_training,input_ids,input_mask,\n segment_ids,label_ids,num_labels,\n use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map,initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint,assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf_estimator.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(total_loss,\n learning_rate,\n num_train_steps,\n num_warmup_steps,use_tpu=False)\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n )\n elif mode == tf_estimator.estimator.ModeKeys.EVAL:\n def metric_fn(per_example_loss,label_ids,logits,is_real_example):\n accuracy = tf.metrics.accuracy(\n labels=label_ids,predictions=predictions,\n weights=is_real_example\n )\n loss = tf.metrics.mean(\n values=per_example_loss,weights=is_real_example\n )\n return {\n 'eval_accuracy':accuracy,\n 'eval_loss':loss,\n }\n\n eval_metrics = metric_fn(per_example_loss,label_ids,predictions,is_real_example)\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics\n )\n else:\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n predictions={\n 'probabilities':probabilities,\n 'predictions':predictions,\n },\n )\n\n return output_spec\n\n return model_fn", "def model_fn_builder(adj_mat, w2n, n2w, bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n # INFO:tensorflow: name = input_ids, shape = (?, 180)\n # INFO:tensorflow: name = input_mask, shape = (?, 180)\n # INFO:tensorflow: name = is_real_example, shape = (?,)\n # INFO:tensorflow: name = label_ids, shape = (?,)\n # INFO:tensorflow: name = masked_lm_ids, shape = (?, 180)\n # INFO:tensorflow: name = masked_lm_positions, shape = (?, 180)\n # INFO:tensorflow: name = masked_lm_weights, shape = (?, 180)\n # INFO:tensorflow: name = segment_ids, shape = (?, 180)\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n #next_sentence_labels = features[\"next_sentence_labels\"]\n \n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n \n gcn_embedding = build_gcn_output(adj_mat, w2n, n2w, model.get_embedding_table(), bert_config, is_training)\n \n (masked_lm_loss,\n masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(\n bert_config, model.get_sequence_output(), gcn_embedding,\n masked_lm_positions, masked_lm_ids, masked_lm_weights)\n\n\n masked_lm_loss = tf.identity(masked_lm_loss, name=\"masked_lm_loss\")\n\n\n total_loss = masked_lm_loss\n\n total_loss = tf.identity(total_loss, name='total_loss')\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint and (not FLAGS.use_horovod or hvd.rank() == 0):\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n if not FLAGS.use_horovod or hvd.rank() == 0:\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, FLAGS.use_horovod)\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op)\n return output_spec\n elif mode == tf.estimator.ModeKeys.PREDICT:\n\n #def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n # masked_lm_weights):#, next_sentence_example_loss,\n #next_sentence_log_probs, next_sentence_labels):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n #masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n # [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n # values=next_sentence_example_loss)\n\n predictions = {\n \"input_ids\": tf.reshape(input_ids, [-1]),\n \"predictions\": masked_lm_log_probs\n }\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions)\n #eval_metric_ops=eval_metrics)\n return output_spec\n else:\n def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])\n masked_lm_ids = tf.reshape(masked_lm_ids, [-1])\n masked_lm_weights = tf.reshape(masked_lm_weights, [-1])\n masked_lm_accuracy = tf.metrics.accuracy(\n labels=masked_lm_ids,\n predictions=masked_lm_predictions,\n weights=masked_lm_weights)\n masked_lm_mean_loss = tf.metrics.mean(\n values=masked_lm_example_loss, weights=masked_lm_weights)\n\n return {\n \"masked_lm_accuracy\": masked_lm_accuracy,\n \"masked_lm_loss\": masked_lm_mean_loss,\n }\n\n eval_metrics = metric_fn(\n masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights)\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics)\n\n return output_spec\n\n return model_fn", "def build_model(self, **kwargs):\n raise NotImplementedError()", "def _main():\n\n # setup paths\n json_model_path = osp.join(FLAGS.input_dir, FLAGS.json_model_fname)\n weights_path = osp.join(FLAGS.input_dir, FLAGS.weights_fname)\n save_path = osp.splitext(json_model_path)[0][:-6] + \"graph_w\" + str(weights_path.split(\"_\")[-1][:-3]) + \".pb\"\n print(\"Loading Model: \" + json_model_path)\n print(\"Loading Weights: \" + weights_path)\n\n # Set keras to test phase\n k.set_learning_phase(0)\n\n # Load json and weights, then compile model\n with open(json_model_path, 'r') as json_file:\n loaded_model_json = json_file.read()\n model = model_from_json(loaded_model_json)\n model.load_weights(weights_path)\n model.compile(loss='mse', optimizer='sgd')\n\n # Freeze graph\n frozen_graph = freeze_session(k.get_session(), output_names=[out.op.name for out in model.outputs])\n\n # Write graph to protobuf file\n tf.train.write_graph(frozen_graph, \"model\", save_path, as_text=False)\n print(\"Written Graph to: \" + save_path)", "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def __init__(self, emotion_model=os.path.join(execution_path,os.path.join(\"models\", \"emotion_model.hdf5\")),\n cascade_file: str = None,\n scale_factor: float = 1.3,\n offsets: tuple = (20, 40),\n compile: bool = False): \n self.__scale_factor = scale_factor\n self.__offsets = offsets\n self.__labels={\n 0: 'angry',\n 1: 'disgust',\n 2: 'fear',\n 3: 'happy',\n 4: 'sad',\n 5: 'surprise',\n 6: 'neutral'\n }\n \n cascade_file=os.path.join(execution_path ,os.path.join(\"models\",\"haarcascade_frontalface_default.xml\")) \n \n self.__face_detector = cv2.CascadeClassifier(cascade_file)\n\n # Local Keras model\n self.deployment = False\n config = tf.ConfigProto(log_device_placement=False)\n config.gpu_options.allow_growth = True\n self.__emotion_classifier = load_model(emotion_model, compile=compile)\n self.__emotion_classifier._make_predict_function()\n self.__emotion_target_size = self.__emotion_classifier.input_shape[\n 1:3]", "def model_fn(features, mode, params): # pylint: disable=unused-argument\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"input_type_ids\"]\n # label_ids = features[\"label_ids\"]\n vocab = vocab_list\n vocab_size = len(vocab_list)\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n\n\n \n # TRAIN\n if not is_predicting:\n\n (loss, predictions, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, vocab, vocab_size, bert_config, use_one_hot_embeddings)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=FLAGS.use_tpu)\n\n # if mode == tf.estimator.ModeKeys.TRAIN:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n ## else:\n # return tf.estimator.EstimatorSpec(mode=mode,\n # loss=loss,\n # eval_metric_ops=eval_metrics)\n else:\n (predictions, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, vocab, vocab_size, bert_config, use_one_hot_embeddings)\n\n predictions = {\n 'probabilities': log_probs,\n 'predictions': predictions\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode, predictions=predictions, scaffold_fn=scaffold_fn)\n\n return output_spec if use_tpu else output_spec.as_estimator_spec()", "def model_fn_builder(config):\n def model_fn(features,labels,mode,params):\n \"\"\"The model_fn for Estimator\"\"\"\n input_q = features[\"input_q\"] # query feature vector\n input_K = features[\"input_K\"] # Key set Matrix\n input_v = features[\"input_v\"] # image visual feature vector\n input_labels = features[\"input_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = modeling.AMT(\n config = config,\n is_trainging = is_training, \n scope = \"AMT\",\n input_q = input_q,\n input_K = input_K,\n input_v = input_v\n )\n loss = model.loss\n q_doc_rank = model.get_predict()\n output_spec = None\n scaffold_fn = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer()\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n loss = loss,\n train_op = train_op,\n scaffold_fn = scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn():\n return 0\n else:\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n predictions = q_doc_rank,\n scaffold_fn = scaffold_fn)\n return output_spec\n return model_fn", "def __init__(self, dim_input=1, dim_output=1, test_num_updates=5):\n self.dim_input = dim_input\n self.dim_output = dim_output\n self.update_lr = FLAGS.update_lr\n self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())\n self.auto_lr = tf.placeholder_with_default(FLAGS.auto_lr, ())\n \n self.classification = False\n self.test_num_updates = test_num_updates\n self.dim_auto = 2 #This should be able to be arbitrary\n if auto:\n self.real_input = 39 # This is square root of the total (its a kernel)\n #self.real_output = 40#self.dim_output\n self.real_output = 39*39 # This should be the complete dimension out. \n self.dim_input = 3*self.dim_auto #= 3*self.dim_auto \n self.dim_output = self.dim_auto\n #This is from each. \n #if auto: self.dim_input, self.dim_output = self.dim_auto, self.dim_auto #If auto, pass in/out the dimension of the latent (auto_\n if FLAGS.datasource == 'sinusoid':\n self.dim_hidden = [40, 40,40]\n self.loss_func = mse\n self.forward = self.forward_fc\n self.construct_weights = self.construct_fc_weights\n elif FLAGS.datasource == 'omniglot' or FLAGS.datasource == 'miniimagenet':\n self.loss_func = xent\n self.classification = True\n if FLAGS.conv:\n self.dim_hidden = FLAGS.num_filters\n self.forward = self.forward_conv\n self.construct_weights = self.construct_conv_weights\n else:\n self.dim_hidden = [256, 128, 64, 64]\n self.forward=self.forward_fc\n self.construct_weights = self.construct_fc_weights\n if FLAGS.datasource == 'miniimagenet':\n self.channels = 3\n else:\n self.channels = 1\n self.img_size = int(np.sqrt(self.dim_input/self.channels))\n else:\n raise ValueError('Unrecognized data source.')", "def build_model(cls, args, task):\n # make sure that all args are properly defaulted (in case there are any new ones)\n base_architecture(args)\n\n decoder_embed_dict = None\n if args.decoder_embed_path:\n decoder_embed_dict = utils.parse_embedding(args.decoder_embed_path)\n utils.print_embed_overlap(decoder_embed_dict, task.target_dictionary)\n\n out_channels = speech_utils.eval_str_nested_list_or_tuple(args.encoder_conv_channels, type=int)\n kernel_sizes = speech_utils.eval_str_nested_list_or_tuple(args.encoder_conv_kernel_sizes, type=int)\n strides = speech_utils.eval_str_nested_list_or_tuple(args.encoder_conv_strides, type=int)\n logger.info('input feature dimension: {}, channels: {}'.format(task.feat_dim, task.feat_in_channels))\n assert task.feat_dim % task.feat_in_channels == 0\n conv_layers = ConvBNReLU(\n out_channels, kernel_sizes, strides, in_channels=task.feat_in_channels,\n ) if out_channels is not None else None\n\n fconv_encoder_input_size = task.feat_dim // task.feat_in_channels\n if conv_layers is not None:\n for stride in strides:\n if isinstance(stride, (list, tuple)):\n assert len(stride) > 0\n s = stride[1] if len(stride) > 1 else stride[0]\n else:\n assert isinstance(stride, int)\n s = stride\n fconv_encoder_input_size = (fconv_encoder_input_size + s - 1) // s\n fconv_encoder_input_size *= out_channels[-1]\n\n encoder = SpeechFConvEncoder(\n conv_layers_before=conv_layers,\n input_size=fconv_encoder_input_size,\n embed_dim=args.encoder_embed_dim,\n convolutions=eval(args.encoder_layers),\n dropout=args.dropout,\n )\n decoder = SpeechFConvDecoder(\n dictionary=task.target_dictionary,\n embed_dim=args.decoder_embed_dim,\n embed_dict=decoder_embed_dict,\n convolutions=eval(args.decoder_layers),\n out_embed_dim=args.decoder_out_embed_dim,\n attention=eval(args.decoder_attention),\n dropout=args.dropout,\n max_positions=args.max_target_positions,\n share_embed=args.share_input_output_embed,\n positional_embeddings=args.decoder_positional_embed,\n )\n return cls(encoder, decoder)", "def create_model(self, model_input, vocab_size, num_frames, **unused_params):\n num_frames_t=num_frames\n num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)\n feature_size = model_input.get_shape().as_list()[2]\n iterations=5#150\n if FLAGS.is_train: \n iterations=120\n model_input = utils.SampleRandomFrames(model_input[:,15:,:], num_frames-15-15,\n iterations)\n # iterations=50\n # model_input=model_input[:,20:-30:5,:]\n model_input=model_input+tf.random_normal(shape=tf.shape(model_input), mean=0.0, stddev=1e-3, dtype=tf.float32)\n\n # print('model_input is', model_input)\n # print('vocab_size is',vocab_size)\n aggregated_model = getattr(video_level_models,\n FLAGS.video_level_classifier_model)\n\n video_attention = AttentionLayers(1024,iterations,256)#256\n audio_attention = AttentionLayers(128,iterations,256/4)#256/4\n\n model_input = slim.batch_norm(\n model_input,\n center=True,\n scale=True,\n is_training=True,\n scope=\"model_input_bn\")\n\n with tf.variable_scope(\"video_Attention\"):\n attention_video = video_attention.forward(model_input[:,:,0:1024]) \n # print('vlad_video is',vlad_video)\n with tf.variable_scope(\"audio_Attention\"):\n attention_audio = audio_attention.forward(model_input[:,:,1024:])\n\n pooled=tf.concat([attention_video,attention_audio],axis=1)\n #instance_att#tf.reduce_mean(pooledi,axis=1)\n\n print('pooled is',pooled)\n\n dr2 = tf.get_variable(\"dr2\",\n [feature_size,1024],\n initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))\n pooled=tf.matmul(pooled,dr2)\n\n pooled = slim.batch_norm(\n pooled,\n center=True,\n scale=True,\n is_training=True,\n scope=\"pooled_bn\")\n\n gating_weights = tf.get_variable(\"gating_weights_2\",\n [1024, 1024],\n initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(1024))) \n gates = tf.matmul(pooled, gating_weights) \n gates = slim.batch_norm(\n gates,\n center=True,\n scale=True,\n is_training=True,\n scope=\"gating_bn\")\n gates = tf.sigmoid(gates)\n pooled = tf.multiply(pooled,gates)\n\n return aggregated_model().create_model(\n model_input=pooled, vocab_size=vocab_size, **unused_params)", "def __init__(self, input_tensor_spec):\n self._input_tensor_spec = input_tensor_spec\n super().__init__()", "def build_model(cls, args, task):\n # print(\"In build_model !!!\")\n default_architecture(args)\n assert args.load_hf_bert_from != ''\n encoder = HuggingFaceBertEncoder(args, task.dictionary)\n\n return cls(args, encoder, task)", "def model_fn_builder():\n \n def model_fn(features, labels, mode, params):\n # features name and shape\n _info('*** Features ****')\n for name in sorted(features.keys()):\n tf.logging.info(' name = {}, shape = {}'.format(name, features[name].shape))\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n # get data\n input_x = features['input_x']\n input_mask = features['input_mask']\n if is_training:\n input_y = features['input_y']\n seq_length = features['seq_length']\n else:\n input_y = None\n seq_length = None\n\n # build encoder\n model = BertEncoder(\n config=cg.BertEncoderConfig,\n is_training=is_training,\n input_ids=input_x,\n input_mask=input_mask)\n embedding_table = model.get_embedding_table()\n encoder_output = tf.reduce_sum(model.get_sequence_output(), axis=1)\n\n # build decoder\n decoder_model = Decoder(\n config=cg.DecoderConfig,\n is_training=is_training,\n encoder_state=encoder_output,\n embedding_table=embedding_table,\n decoder_intput_data=input_y,\n seq_length_decoder_input_data=seq_length)\n logits, sample_id, ppl_seq, ppl = decoder_model.get_decoder_output()\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {'sample_id': sample_id, 'ppls': ppl_seq}\n output_spec = tf.estimator.EstimatorSpec(mode, predictions=predictions)\n else:\n if mode == tf.estimator.ModeKeys.TRAIN:\n max_time = ft.get_shape_list(labels, expected_rank=2)[1]\n target_weights = tf.sequence_mask(seq_length, max_time, dtype=logits.dtype)\n batch_size = tf.cast(ft.get_shape_list(labels, expected_rank=2)[0], tf.float32)\n\n loss = tf.reduce_sum(\n tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits) * target_weights) / batch_size\n\n learning_rate = tf.train.polynomial_decay(cg.learning_rate,\n tf.train.get_or_create_global_step(),\n cg.train_steps / 100,\n end_learning_rate=1e-4,\n power=1.0,\n cycle=False)\n\n lr = tf.maximum(tf.constant(cg.lr_limit), learning_rate)\n optimizer = tf.train.AdamOptimizer(lr, name='optimizer')\n tvars = tf.trainable_variables()\n gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=cg.colocate_gradients_with_ops)\n clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)\n train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step())\n\n\n # this is excellent, because it could display the result each step, i.e., each step equals to batch_size.\n # the output_spec, display the result every save checkpoints step.\n logging_hook = tf.train.LoggingTensorHook({'loss' : loss, 'ppl': ppl, 'lr': lr}, every_n_iter=cg.print_info_interval)\n\n output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op, training_hooks=[logging_hook])\n elif mode == tf.estimator.ModeKeys.EVAL:\n # TODO\n raise NotImplementedError\n \n return output_spec\n \n return model_fn", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n bert_config = modeling.BertConfig.from_json_file(bert_config_file)\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n (total_loss, per_example_loss, log_probs) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids,\n num_labels, False)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = []\n print('****TRYING TO LOAD FROM INIT CHECKPOINT %s****' % init_checkpoint)\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n print(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n print(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions={\n \"log_probs\": log_probs\n })\n\n return output_spec", "def main(unused_argv):\n model_params = sketch_rnn_model.get_default_hparams()\n if FLAGS.hparams:\n model_params.parse(FLAGS.hparams)\n trainer(model_params)", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n logits = create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"prediction\": tf.argmax(logits, axis=-1),\n }\n output_spec = tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n else:\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=3, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n label = features[\"label\"]\n\n loss = compute_loss(logits, label)\n predicted_classes = tf.argmax(logits, axis=-1)\n accuracy = tf.metrics.accuracy(labels=label, predictions=predicted_classes, name='acc_op')\n\n # global global_acc_list\n # global_acc_list.append(accuracy)\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n metrics = {'accuracy': accuracy}\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metric_ops=metrics,\n train_op=train_op)\n elif mode == tf.estimator.ModeKeys.EVAL:\n metrics = {'accuracy': accuracy}\n output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec", "def create():\n with torch.set_grad_enabled(False):\n model = torch.hub.load(\n \"pytorch/vision:v0.6.0\", \"vgg11\", pretrained=True).eval()\n\n with_cuda = torch.cuda.is_available()\n if with_cuda:\n model.to(\"cuda\")\n else:\n logging.warn(\"Running on CPU, no CUDA detected.\")\n\n def call(features):\n images = features[\"image\"].numpy()\n # Normalize according to the documentation. Note that the pro-processing\n # will already have the range normalized to [0, 1].\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n images_normalized = (images - mean) / std\n # Reshape from [batch, h, w, c] -> [batch, c, h, w]\n images_normalized_bchw = np.transpose(\n images_normalized, [0, 3, 1, 2]).astype(np.float32).copy()\n with torch.no_grad():\n images_torch = torch.from_numpy(images_normalized_bchw)\n if with_cuda:\n images_torch = images_torch.to(\"cuda\")\n logits = model(images_torch)\n return torch.nn.functional.softmax(logits, dim=-1).cpu().numpy()\n\n preprocess_config = \"resize_small(256)|central_crop(224)|value_range(0,1)\"\n preprocess_fn = pipeline_builder.get_preprocess_fn(\n preprocess_config, remove_tpu_dtypes=False)\n return call, preprocess_fn", "def __init__(self, pkl_path, joint_type='cocoplus', dtype=tf.float32):\n # -- Load SMPL params --\n with open(pkl_path, 'r') as f:\n dd = pickle.load(f) \n # Mean template vertices\n self.v_template = tf.Variable(\n undo_chumpy(dd['v_template']),\n name='v_template',\n dtype=dtype,\n trainable=False)\n # Size of mesh [Number of vertices, 3]\n self.size = [self.v_template.shape[0].value, 3]\n self.num_betas = dd['shapedirs'].shape[-1]\n # Shape blend shape basis: 6980 x 3 x 10\n # reshaped to 6980*30 x 10, transposed to 10x6980*3\n shapedir = np.reshape(\n undo_chumpy(dd['shapedirs']), [-1, self.num_betas]).T\n self.shapedirs = tf.Variable(\n shapedir, name='shapedirs', dtype=dtype, trainable=False)\n\n # Regressor for joint locations given shape - 6890 x 24\n self.J_regressor = tf.Variable(\n dd['J_regressor'].T.todense(),\n name=\"J_regressor\",\n dtype=dtype,\n trainable=False)\n\n # Pose blend shape basis: 6890 x 3 x 207, reshaped to 6890*30 x 207\n num_pose_basis = dd['posedirs'].shape[-1]\n # 207 x 20670\n posedirs = np.reshape(\n undo_chumpy(dd['posedirs']), [-1, num_pose_basis]).T\n self.posedirs = tf.Variable(\n posedirs, name='posedirs', dtype=dtype, trainable=False)\n\n # indices of parents for each joints\n self.parents = dd['kintree_table'][0].astype(np.int32)\n\n # LBS weights\n self.weights = tf.Variable(\n undo_chumpy(dd['weights']),\n name='lbs_weights',\n dtype=dtype,\n trainable=False)\n\n # This returns 19 keypoints: 6890 x 19\n self.joint_regressor = tf.Variable(\n dd['cocoplus_regressor'].T.todense(),\n name=\"cocoplus_regressor\",\n dtype=dtype,\n trainable=False)\n if joint_type == 'lsp': # 14 LSP joints!\n self.joint_regressor = self.joint_regressor[:, :14]\n\n if joint_type not in ['cocoplus', 'lsp']:\n print('BAD!! Unknown joint type: %s, it must be either \"cocoplus\" or \"lsp\"' % joint_type)\n import ipdb\n ipdb.set_trace()", "def create_model(model_name, random_state, epoch, device, log_path, **hparams):\n model = eval(f'{model_name}')(\n **hparams, epoch=int(epoch), random_state=random_state, device=device,\n log_path=log_path\n )\n\n return model", "def init_tensors(self, sample, *args):\n raise NotImplementedError" ]
[ "0.67856354", "0.6725397", "0.6558513", "0.65427756", "0.6461063", "0.64487284", "0.63853717", "0.6380155", "0.6334473", "0.63143593", "0.62861764", "0.6284722", "0.6238022", "0.62292343", "0.6205176", "0.6196909", "0.6196012", "0.61855704", "0.6158863", "0.61315936", "0.6114675", "0.6085014", "0.60655206", "0.6055188", "0.6049067", "0.60445416", "0.60302657", "0.6014607", "0.6008279", "0.60066134", "0.6005976", "0.6005338", "0.60011613", "0.59996986", "0.59936076", "0.5989531", "0.5988167", "0.59861606", "0.59858096", "0.59658843", "0.5962221", "0.5951206", "0.5950305", "0.5948262", "0.5940291", "0.59347624", "0.59341013", "0.5930249", "0.5927861", "0.5921328", "0.5918885", "0.5912987", "0.59122497", "0.5910063", "0.58976513", "0.5894516", "0.58898926", "0.5887741", "0.5883058", "0.5880978", "0.58734435", "0.58687097", "0.58606255", "0.5859897", "0.585927", "0.585884", "0.5851366", "0.58509743", "0.5848814", "0.58479184", "0.58303225", "0.5829106", "0.58270127", "0.58184695", "0.58116716", "0.5796363", "0.57961595", "0.5790042", "0.57888544", "0.5788784", "0.57836086", "0.5782435", "0.5780871", "0.57791823", "0.57773596", "0.5775049", "0.5773353", "0.5771105", "0.5767814", "0.5766412", "0.576285", "0.5757329", "0.5756639", "0.5751006", "0.5746874", "0.57454574", "0.5745176", "0.5743617", "0.57432854", "0.5736373", "0.5731714" ]
0.0
-1
Return parameter details for dim, nIter, lamb and alph
def paramDetails(cls): return { 'dim': (10, 20, 2, 20), 'nIter': (1, 10, 2, 5), 'lamb': (.1, 1., .1, .05), 'alph': (30, 50, 5, 40) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setParams(self,dim=20, nIter=5, lamb=0.05, alph=40):\n self._dim = dim\n self._nIter = nIter\n self._lamb = lamb\n self._alph = alph", "def show_parameters(self):\n with np.printoptions(precision=3, suppress=True):\n print('number of wind phase = {}'.format(self.ncomp))\n print('galactic parameter = {}'.format(self.scaling_field))\n print('reference height = {}'.format(self.z0))\n for p in ['cool_params','hot_params','params','ref_params','scaling_params']:\n params = getattr(self,p)\n print(p)\n for k,v in params.items():\n print(' {} = {}'.format(k,v))", "def params(self):\n return {'out_dim': self.out_dim,\n 'act_fn': self.act_fn,\n 'use_bias': self.use_bias,\n 'idx': self.idx}", "def inspect_state(self):\n for name in self._param_store.get_all_param_names():\n self._logger.info(\"Param [%s]: %r\", name,\n pyro.param(name).data.numpy())", "def get_pars(self):\n return [self.z, self.b, self.logN]", "def param_info():\n\n\tgizmo_names = syn.getGizmoNames()\n\n\tfor gizmo in gizmo_names:\n\t\tparams = syn.getParameterNames(gizmo)\n\t#doesnt get all parameters from gizmos i.e. WaveFreq\n\n\t# get all info on the 'WaveFreq' parameter\n\tGIZMO = 'aStim2'\n\tPARAMETER = 'WaveFreq'\n\n\t# info = syn.getParameterInfo(GIZMO, PARAMETER)\n\t#\n\t# # get the array size (should be 100)\n\t# sz = syn.getParameterSize(GIZMO, PARAMETER)\n\t#\n\t# # write values 1 to 50 in second half of buffer\n\t# result = syn.setParameterValues(GIZMO, PARAMETER, np.arange(1, 51), 50)\n\t#\n\t# # read all values from buffer\n\t# syn.getParameterValues(GIZMO, PARAMETER, sz)\n\t#\n\t# # get all info on the 'Go' parameter\n\t# PARAMETER = 'Go'\n\t# info = syn.getParameterInfo(GIZMO, PARAMETER)\n\t#\n\t# # flip the switch\n\t# result = syn.setParameterValue(GIZMO, PARAMETER, 1)\n\t#\n\t# # check the value\n\tfreq = syn.getParameterValue(GIZMO, PARAMETER)\n\tprint('value =', freq)\n\tfreq = [freq]\n\n\t# also verify visually that the switch slipped in the run\n\t# time interface. This state change will be logged just\n\t# like any other variable change and saved with the runtime\n\t# state.\n\n\tnumTrials = 5 #total number of trials across stimuli\n\tISI = [2.0, 3.0, 4.0, 5.0] # ISI in seconds\n\n\t# flash parameters\n\tflash_dur = [.001] # flash durs in seconds (100 ms, 200 ms)\n\tluminance = [[1, 1, 1], [.86, .86, .86], [0, .1, 1]] # white , grayish, purple just for testing\n\n\t# auditory parameters\n\tduration = [.005] # in seconds; pulseDur in TDT\n\tsound_levels = [20.0, 40.0, 60.0, 80.0] # dB; waveAmp in TDT\n\n\t# Auditory on (T/F? if T then A+V, if F then Visual only)\n\tstims = {0: \"auditory_only\",\n\t\t\t 1: \"visual_only\",\n\t\t\t 2: \"A+V\"\n\t\t\t }\n\n\texper = Experiment(numTrials=numTrials, ISI=ISI, flash_dur=flash_dur, luminance=luminance, wave_freq=freq,\n\t\t\t\t\t pulse_dur=duration, wave_amp=sound_levels, stimulus=stims)\n\texper.run_experiment()", "def getParam(self):\n return self.__alpha0, self.__alpha1, self.__beta, self.__eta", "def params(self) -> Iterable[sympy.Symbol]:\n for i in range(self.iterations):\n for p in range(len(self.qubits)):\n if (self.include_all_z or not\n numpy.isclose(self.orbital_energies[p], 0)):\n yield LetterWithSubscripts('U', p, i)\n for p, q in itertools.combinations(range(len(self.qubits)), 2):\n if (self.include_all_cz or not\n numpy.isclose(self.hamiltonian.two_body[p, q], 0)):\n yield LetterWithSubscripts('V', p, q, i)", "def give_block_param_info(self):\n\n nb_block=0\n nb_values=[]\n k=0\n while 1:\n k+=1\n try:\n self.info['mw_parameter'][str(10*k+1)]\n except:\n break\n nb_block+=1\n if type(self.info['mw_parameter'][str(10*k+3)])==list:\n nb_values.append(len(self.info['mw_parameter'][str(10*k+3)]))\n else:\n nb_values.append(1)\n\n return nb_block,nb_values", "def parameters(self):", "def param(self,name,i):\n state = self.getstate(name)\n x,C = state.vec,state.cov\n cc = C[i,i]\n if (cc>0.): cc=sqrt(cc)\n xx,cc = x[i],cc\n debug('kfnode.param ',(name,xx,cc))\n return xx,cc", "def _determine_parameters(self, paramdict):\n for var in paramdict:\n if is_dimensionless(paramdict[var]):\n self._all_params_unit[var] = \"none\"\n yield lems.Parameter(var, \"none\")\n else:\n dim = _determine_dimension(paramdict[var])\n self._all_params_unit[var] = dim\n yield lems.Parameter(var, dim)", "def parameters(self):\n return {\"P\": self.P,\n \"T\": self.T}", "def lattice_parameters(self):\n return self.a, self.b, self.c, self.alpha, self.beta, self.gamma", "def get_layer_var_names(self):\n return(self.params)", "def parameters(self):\n return self.pars", "def params(dim):\r\n m = 3\r\n s = 1\r\n q = 2 ** (m - 1)\r\n while s < dim:\r\n m += 1\r\n s = m + math.factorial(m - 1) / (2 * math.factorial(m - 3))\r\n q = 2 ** (m - 1)\r\n\r\n return (\r\n m, q, s - dim)", "def getParams(self):\n return self.trainError, self.trainAcc, self.w", "def getParameters(self):\n\n current_params = {'taux': self.taux, 'mu': self.mu, 'G': self.G, 'alpha_0': self.alpha_0,\n 'delta': self.delta, 'p': self.p, 'I0': self.I0, 'kparam': self.kparam}\n\n return (current_params)", "def parameters(self):\n return {\"W\": self.W,\n \"T\": self.T,\n \"P\": self.P}", "def potential_parameters(cls):\n return [\"k\", \"length\"]", "def get_iperparams(self):\n\t\treturn (self.D, self.K)", "def get_params(self):\n return {\n \"nspecies\": self.nspecies,\n \"lmax\": self.lmax,\n \"nmax\": self.nmax,\n \"rcut\": self.rcut,\n \"sigma\": self.sigma,\n \"trans_width\": self.trans_width\n }", "def get_hyperparameter_names():\n params = ['mu', 'nu', 'r', 's']\n return params", "def get_params(self):", "def parameters(self):\n params = []\n for layer in (self.conv1, self.conv2, self.conv3, self.conv4, self.dense1, self.dense2):\n params += list(layer.parameters)\n return params", "def get_hyperparams(self):", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['method'] = self.method\n paramDict['dimension'] = self.dimension\n paramDict['rank'] = self.rank\n paramDict['mu'] = self.mu\n paramDict['covariance'] = self.covariance\n return paramDict", "def params_desc(self):\n return \"{}/{}/{}/{}\".format(\n self.learning_rate, self.movement, self.milestones, self.gamma\n )", "def get_parameters(self):\n return self.sess.run(self.A_symm)", "def get_params(self, deep=False):\n return {\"alpha\": self.alpha, \"beta\": self.beta, \"gamma\": self.gamma, \"W\": self.W, \"bias\": self.bias, \"add_bias\": self.add_bias, \"opts\": self.opts}", "def _get_parameters(self):\n return (self.SYMBOL, self.parameterArray())", "def print_param_names(model):\n for (param_name, param) in model.get_parameters().items():\n print(param_name, param.shape)", "def get_params(self) -> np.array:\n pass", "def parameters(self):\n #print \"in instrument.parameter()\"\n return self._params", "def def_paramt():\n Zeff = 1.0\n amu = 2.0\n mf = mp*amu\n return Zeff, amu,mf", "def params(self):\n return self._pars", "def params(self):\n return {'kernel_size': self.kernel_size,\n 'stride': self.stride,\n 'n_kernel': self.n_kernel,\n 'padding': self.padding,\n 'act_fn': self.act_fn,\n 'output_shape': self.output_shape,\n 'w_init_fn': self.w_init_fn,\n 'use_bias': self.use_bias,\n 'idx': self.idx}", "def getParameters(self):\n params = []\n for m in [self.ix, self.ih, self.fx, self.fh, self.ox, self.oh, self.ux, self.uh]:\n # we do not get param of output module\n l = list(m.parameters())\n params.extend(l)\n\n one_dim = [p.view(p.numel()) for p in params]\n params = F.torch.cat(one_dim)\n return params", "def getParameters(self):\n params = []\n for m in [self.ix, self.ih, self.fx, self.fh, self.ox, self.oh, self.ux, self.uh]:\n # we do not get param of output module\n l = list(m.parameters())\n params.extend(l)\n\n one_dim = [p.view(p.numel()) for p in params]\n params = F.torch.cat(one_dim)\n return params", "def getParams(self):\n return self.W, self.b", "def param(self):\r\n paramlist = []\r\n gradlist = []\r\n\r\n for layer in self.layers:\r\n try:\r\n layer_param, layer_grad = layer.param()\r\n paramlist = paramlist + layer_param\r\n gradlist = gradlist + layer_grad\r\n except ValueError:\r\n continue\r\n return paramlist, gradlist", "def params(self):\n return {'kernel_size': self.kernel_size,\n 'stride': self.stride,\n 'n_kernel': self.n_kernel,\n 'padding': self.padding,\n 'act_fn': self.act_fn,\n 'w_init_fn': self.w_init_fn,\n 'resize': self.resize,\n 'use_bias': self.use_bias,\n 'atrous': self.atrous,\n 'idx': self.idx}", "def display_parameters(self):\n\n self.logging.debug(\"============\")\n for attr in self.parm_list:\n self.logging.debug(attr.label + \" (\" + attr.when + \")\" + \" = \" + str(attr.value))\n self.logging.debug(\"============\")", "def print_params_shape(scope, param_type):\n shapes = [var.get_shape().as_list() for var in get_trainable_vars(scope)]\n nb_params = sum([reduce(lambda x, y: x * y, shape) for shape in shapes])\n print(' {} shapes: {}'.format(param_type, shapes))\n print(' {} params: {}'.format(param_type, nb_params))", "def num_params(self):", "def _get_params(self):\r\n return self.k._get_params()", "def getParameters(self):\n\t\td = AdaptiveBatAlgorithm.getParameters(self)\n\t\td.update({\n\t\t\t'A_l': self.A_l,\n\t\t\t'A_u': self.A_u,\n\t\t\t'r_l': self.r_l,\n\t\t\t'r_u': self.r_u,\n\t\t\t'tao_1': self.tao_1,\n\t\t\t'tao_2': self.tao_2\n\t\t})\n\t\treturn d", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def get_params(self):\n return self.arr", "def printParameters(self):\n print(\"----------Model Parameters----------\")\n print(\"Initial Conv. Depth : \" + str(self.conv_depth))\n print(\"Number of Classes : \" + str(self.n_classes))\n print(\"Dropout : \" + str(self.dropout))\n print(\"Activation Function : Relu\")\n print(\"Input Shape : \" + str(self.input_shape))\n print(\"Batch Size : \" + str(self.batch_size))\n print(\"--------Optimizer Parameters--------\")\n print(\"Learning Rate : \" + str(self.optimizer.lr))\n print(\"Momentum : \" + str(self.optimizer.momentum))\n print(\"Initial Decay : \" + str(self.optimizer.initial_decay))", "def parameter_names(self) -> List[str]:", "def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}", "def params():\n raise NotImplementedError", "def get_params(self):\n return self.w, self.b", "def parameters(self):\n pass", "def parameterNames(self, p_int): # real signature unknown; restored from __doc__\n return []", "def parameters(self):\n return NeuralNetwork.flatten([module.parameters() for module in self.modules])", "def parameters(self):\n return self.vars", "def __init__(self, N, n):\n #adam params\n self.beta_1 = 0.9\n self.beta_2 = 0.999\n self.epsilon = 1e-8\n #not adam params\n self.arr = np.zeros((N, 2*n+1)) #param array\n self.max = sum([psi.lbd(p + 1, gamma, n) for p in range(n)])\n self.loss_memory = []\n self.eta_memory = []\n self.grad_memory = []\n self.N = N\n self.n = n", "def variables_used (self) :\r\n\t\treturn [i[0] for i in self.parameters]", "def printfunc(self, params, iter, resid, *args, **kwargs):\n\n print(iter) \n print(params.valuesdict())", "def getParams(self):\n\n\t\tparams = {\"Nparticles\":self.__Nparticles,\"Nkicks\":self.__Nkicks,\"kappa\":self.__kappa, \"eta\":self.__eta,\"gamma\":self.__gamma, \"omega\":self.__omega,\n\t\t\"Kbt\":self.__Kbt, \"tk\":self.__tk}\n\n\t\treturn params", "def algorithm_parameters(alg):\n if alg in list(SEM_TYPE.keys()):\n return simulate_parameters(alg)\n\n param_dict = dict()\n\n param = getfullargspec(INLINE_ALGORITHMS[alg.upper()].__init__)\n if param is not None:\n param_len = len(param.args)\n if param.defaults:\n if 'input_dim' in param.args:\n param_dict.update({'input_dim': None})\n for index, value in enumerate(reversed(param.defaults)):\n if not isfunction(value) and (value is not None):\n param_dict.update(\n {param.args[param_len - index - 1]: value})\n param = getfullargspec(INLINE_ALGORITHMS[alg.upper()].learn)\n if param is not None:\n param_len = len(param.args)\n if param_len > 2:\n if 'rank' in param.args:\n param_dict.update({'rank': None})\n return param_dict", "def get_params(self):\n raise NotImplementedError", "def param_init(self, sig=0.01):\n self.rhos = np.ones(self.Ndim)\n self.a = np.random.rand(self.Ndim, self.Nhidden)\n self.c = np.random.rand(self.Nhidden)\n self.W = np.random.randn(self.Nhidden, self.Ndim) * sig\n self.alphas = np.zeros((self.Ndim, self.Ncomponents))\n self.mus = np.zeros((self.Ndim, self.Ncomponents))\n self.sigmas = np.zeros((self.Ndim, self.Ncomponents))\n self.optimize_params = [self.rhos, self.c, self.W]\n\n types = ['alpha', 'mu', 'sigma']\n self.bs = {}\n self.Vs = {}\n for t in types:\n self.bs[t] = np.random.randn(self.Ndim, self.Ncomponents) * sig\n self.Vs[t] = np.random.randn(self.Ndim, self.Nhidden,\n self.Ncomponents) * sig\n self.optimize_params.append(self.bs[t])\n self.optimize_params.append(self.Vs[t])", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict[f'OutStream Available {self.dim}D :'] = self.availableOutStreamTypes[self.dim]\n paramDict['Plot is '] = str(self.dim) + 'D'\n for index in range(len(self.sourceName)):\n paramDict['Source Name ' + str(index) + ' :'] = self.sourceName[index]\n\n return paramDict", "def getParameters(self): #$NON-NLS-1$\r", "def derived_parameters(cls):\n return ['cgg', 'cdd', 'css', 'cbb', 'vstar', 'gain', 'ft']", "def get_parameters(self):\n return self.sess.run(self.A)", "def _get_params_summary(self, alpha=0.1):\n\n # TODO: Acknowledge that this code was modified from the statsmodels package\n\n results = self._model.fit()\n\n def forg(x, prec=3):\n if prec == 3:\n # for 3 decimals\n if (abs(x) >= 1e4) or (abs(x) < 1e-4):\n return '%9.3g' % x\n else:\n return '%9.3f' % x\n elif prec == 4:\n if (abs(x) >= 1e4) or (abs(x) < 1e-4):\n return '%10.4g' % x\n else:\n return '%10.4f' % x\n else:\n raise NotImplementedError\n\n # Parameters part of the summary table\n conf_int = results.conf_int(alpha)\n\n # Dictionary to store the header names for the parameter part of the\n # summary table. look up by modeltype\n alp = str((1 - alpha) * 100) + '%'\n\n param_header = ['coef', 'std err', 't', 'P>|t|',\n '[' + alp + ' Conf. Int.]']\n\n xname = self._model.exog_names\n\n params_stubs = xname\n\n exog_idx = range(len(xname))\n\n # center confidence intervals if they are unequal lengths\n confint = [\"%s %s\" % tuple(map(forg, conf_int.ix[i])) for i in exog_idx]\n len_ci = list(map(len, confint))\n max_ci = max(len_ci)\n min_ci = min(len_ci)\n\n if min_ci < max_ci:\n confint = [ci.center(max_ci) for ci in confint]\n\n # explicit f/g formatting, now uses forg, f or g depending on values\n params_data = zip([forg(results.params[i], prec=4) for i in exog_idx],\n [forg(results.bse[i]) for i in exog_idx],\n [forg(results.tvalues[i]) for i in exog_idx],\n # [\"%#6.3f\" % (results.pvalues[i]) for i in exog_idx],\n [\"%#6.3g\" % (results.pvalues[i]) for i in exog_idx],\n confint\n )\n params_data = list(params_data)\n parameter_table = SimpleTable(params_data,\n param_header,\n params_stubs,\n txt_fmt=fmt_params\n )\n\n if results.params.shape[0] > 2:\n vif_table = self._get_vif_table()\n parameter_table.extend_right(vif_table)\n\n return parameter_table", "def parameterNode(self):\r\n # framework\r\n profbox()\r\n return self.parameterNode", "def parameters(self):\n return []", "def _LayerParams(ii):\n if isinstance(p.transformer_layer_params_tpl, list):\n factor = p.num_layers // len(p.transformer_layer_params_tpl)\n i = ii // factor\n p_ii = p.transformer_layer_params_tpl[i].Copy()\n else:\n p_ii = p.transformer_layer_params_tpl.Copy()\n p_ii.name = 'layer_%d' % ii\n p_ii.has_aux_atten = p.has_aux_atten\n p_ii.mask_self_atten = p.mask_self_atten\n p_ii.input_dim = p.mdl_dim or p_ii.input_dim\n p_ii.output_dim = p.mdl_dim or p_ii.output_dim\n p_ii.packed_input = p.packed_input\n if (not isinstance(p_ii.tr_atten_tpl.num_heads, list) and\n p.num_atten_heads is not None):\n p_ii.tr_atten_tpl.num_heads = p.num_atten_heads\n if p.dropout_prob is not None:\n p_ii.tr_atten_tpl.atten_dropout_prob = p.dropout_prob\n p_ii.tr_atten_tpl.residual_dropout_prob = p.dropout_prob\n p_ii.tr_fflayer_tpl.residual_dropout_prob = p.dropout_prob\n p_ii.tr_fflayer_tpl.relu_dropout_prob = p.dropout_prob\n if p.stochastic_depth_droppath_prob is not None:\n ratio = p.stochastic_depth_droppath_prob * ii / (p.num_layers - 1)\n p_ii.tr_atten_tpl.residual_droppath_prob = ratio\n p_ii.tr_fflayer_tpl.residual_droppath_prob = ratio\n if p.hidden_dim is not None:\n p_ii.tr_fflayer_tpl.hidden_dim = p.hidden_dim\n p_ii.tr_atten_tpl.add_unnormalized_input = p.add_unnormalized_input\n if ii in p.moe_layers:\n p_ii.tr_fflayer_tpl = _MoeLayerParams(p_ii.tr_fflayer_tpl)\n return p_ii", "def get_parameters(self):\n params = {\"train_frac\": self.train_frac, \"split_alg\": self.split_alg,\n \"nw_name\": self._nw_name, \"split_id\": self.split_id}\n return params", "def params(self):\n return {'shape': self.shape,\n 'name': self.name}", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['n' ] = self.n\n paramDict['p' ] = self.p\n return paramDict", "def getstate(self):\r\n return Parameterized.getstate(self) + [self.parts,\r\n self.num_parts,\r\n self.num_params,\r\n self.input_dim,\r\n self.input_slices,\r\n self.param_slices\r\n ]", "def _pprint_params(self):\n return {'x_range': self.x_range, 'y_range': self.y_range,\n 'step': self.step, 'shape': self.shape,\n 'type': self.type}", "def __parameters__(self) -> tuple[TypeVar, ...]:\n return super().__getattribute__(\"_parameters\")", "def display_layer_parameters(self):\n pprint.pprint(vars(self))\n return", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar\n paramDict['k' ] = self.k\n paramDict['low' ] = self.low\n return paramDict", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale))", "def _core_init_params(self) :\n\t\ta_list,b_list = [],[]\n\t\tg_list,h_list = [],[]\n\t\t\n\t\t\n\t\tfor eqnid,eqn in enumerate(self.equations) : \n\t\t\treg_p = self.regressors[eqnid]['prod']\n\t\t\treg_d = self.regressors[eqnid]['degrad']\n\t\t\th_eqn = self.initsol['h'][eqn-1]\n\t\t\tg_eqn = self.initsol['g'][eqn-1]\n\n\n\t\t\ta_list.append(self.initsol['alpha'][eqn-1])\n\t\t\tb_list.append(self.initsol['beta'][eqn-1])\n\t\t\t\n\t\t\tg_eqn = np.array([g_eqn[reg-1] for reg in reg_p])\n\t\t\th_eqn = np.array([h_eqn[reg-1] for reg in reg_d])\n\t\t\th_list.append(h_eqn)\n\t\t\tg_list.append(g_eqn)\n\t\n\t\treturn (a_list,b_list,g_list,h_list)", "def parameters(self):\n return self._params", "def get_parameters(self) -> Dict[str, ParameterInfo]:\n parameter_info_list = {}\n\n for associated_op in self.associated_ops:\n word_tensor = self._get_word_tensor(associated_op)\n position_tensor = self._get_position_tensor(associated_op)\n token_tensor = self._get_token_tensor(associated_op)\n\n for param_tensor in [word_tensor, position_tensor, token_tensor]:\n op_with_param = None\n for consumer in param_tensor.consumers():\n if not consumer.name.startswith('gradients/'):\n assert op_with_param is None\n op_with_param = consumer\n assert op_with_param is not None\n parameter_info_list[param_tensor.op.name] = ParameterInfo('weight', [op_with_param.name])\n\n return parameter_info_list", "def print_all_params(self, disp=True):\n descriptions = {'general': {}}\n for name, param in self.params.items():\n descriptions['general'][name] = param.get_description()\n\n for comp, comp_obj in self.components.items():\n descriptions[comp] = {}\n for name in comp_obj.get_params():\n descriptions[comp][name] = comp_obj.get_param_description(name)\n return self._print_params(descriptions, disp)", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def get_parameter_names(self):\n parNames = []\n # for par in self.variables: # TODO: LIKELY A BUG! DOES THE SAME AS get_variable_names()\n for par in self.parameters: # TRYING TO SOLVE THE ISSUE\n # EstimationVariable\n parNames.append(par.name)\n return parNames", "def derived_parameters(cls):\n return ['cgg', 'cdd', 'vstar', 'gain', 'ft']", "def _get_parameters(n, j, domain, g, ncap):\n alphas, betas = rc.recurrenceCoefficients(n - 2, lb=domain[0], rb=domain[1],\n j=j, g=g, ncap=ncap)\n omegas = g * np.array(alphas)\n ts = g * np.sqrt(np.array(betas)[1::])\n c0 = np.sqrt(betas[0])\n return omegas, ts, c0", "def _mn_par_ ( self , i ) :\n if not i in self : raise IndexError\n #\n val = ctypes.c_double ( 0 ) \n err = ctypes.c_double ( 0 ) \n #\n res = self.GetParameter ( i , val , err )\n #\n val = float ( val.value )\n err = float ( err.value )\n #\n return VE ( val , err*err )", "def Get_params(numparams, dt, D):\n # bounds from table 1 Kowalek et al 2020\n Nmin, Nmax = 30, 600\n Bmin, Bmax = 1, 6\n Rmin, Rmax = 1, 17\n alphamin, alphamax = 0.3, 0.7\n Qmin, Qmax = 1, 9\n\n # Gen parameters\n Q = np.random.uniform(Qmin, Qmax, size=numparams)\n Q1, Q2 = Q, Q\n\n NsND = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsAD = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsCD = np.random.randint(Nmin, Nmax + 1, size=numparams)\n NsDM = np.random.randint(Nmin, Nmax + 1, size=numparams)\n TDM = NsDM * dt\n\n B = np.random.uniform(Bmin, Bmax, size=numparams)\n r_c = np.sqrt(D * NsCD * dt / B) # solving for r_c in eq. 8 Kowalek\n\n R = np.random.uniform(Rmin, Rmax, size=numparams)\n v = np.sqrt(R * 4 * D / TDM) # solving for v in eq. 7 Kowalek\n\n alpha = np.random.uniform(alphamin, alphamax, size=numparams)\n\n # Compute sigma for ND, AD, CD from eq. 12 Kowalek\n sigmaND = np.sqrt(D * dt) / Q1\n sigmaAD = np.sqrt(D * dt) / Q1\n sigmaCD = np.sqrt(D * dt) / Q1\n\n # Compute sigma for DM from eq. 12 Kowalek\n sigmaDM = np.sqrt(D * dt + v ** 2 * dt ** 2) / Q2\n\n return np.array(\n [\n NsND,\n NsAD,\n NsCD,\n NsDM,\n D * np.ones(numparams),\n dt * np.ones(numparams),\n r_c,\n v,\n alpha,\n sigmaND,\n sigmaAD,\n sigmaCD,\n sigmaDM,\n ]\n ).T", "def get_params(self, deep=True):\n #params = dict(kernel=self.kernel, dim=self.dim)\n params = dict(columns=self.columns)\n if deep:\n for i, kernel in enumerate(self.kernels):\n print(\"--->\", \"\\ti = \", i, \"\\tkernel = \", kernel)\n deep_items = kernel.get_params().items()\n #params.update((k, val) for k, val in deep_items)\n for k, val in deep_items:\n print(\"\\tkey = \", k, \"\\tvalue = \", val)\n params.update(('k{}__{}'.format(i, k), val) for k, val in deep_items)\n return params", "def getParameters(self):\n return {'channels':self._channels, 'means':self._means, 'stds':self._stds}", "def print_trainable_params(scope=None):\n n_params = 0\n print('name \\t| shape \\t| num parameters')\n\n for var in tf.trainable_variables(scope):\n # shape is an array of tf.Dimension\n shape = var.get_shape()\n n_elems = shape.num_elements()\n print(var.name, shape, n_elems)\n n_params += n_elems\n\n print('Total parameters:', n_params)", "def parameters(self):\n return [o.parameters for o in self.obs]", "def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['lambda'] = self.lambdaVar # rate parameter\n paramDict['low' ] = self.low # lower domain boundary\n return paramDict", "def get_params(self, deep = True, bounds = True):\n params = dict() \n for p in self._LIST_PARAMETERS:\n params[p] = self._get_one_param(p)\n if(bounds):\n params[p + '_bounds'] = self._get_one_bound(p)\n if(deep and self._FLAG_TYPE == 'collection' and p == 'list_func'):\n for n, sub_obj in enumerate(params[p]):\n sub_params = sub_obj.get_params(deep, bounds)\n params.update({'f' + str(n) + '__' + key: val for key, val in sub_params.items()})\n \n return params" ]
[ "0.66055083", "0.65783185", "0.6249512", "0.60537255", "0.6009132", "0.60000795", "0.599074", "0.5983164", "0.59602505", "0.59599835", "0.59563273", "0.5954735", "0.5952508", "0.59498894", "0.59248936", "0.5918452", "0.5881164", "0.58748", "0.5865431", "0.5844908", "0.5821393", "0.58213127", "0.58106035", "0.58074", "0.57923317", "0.57842344", "0.5756075", "0.5753663", "0.5751509", "0.5746594", "0.57446456", "0.5740447", "0.57372224", "0.5724931", "0.5690531", "0.56810194", "0.5676005", "0.5664775", "0.5645824", "0.5645824", "0.56443864", "0.564181", "0.56337863", "0.5606107", "0.55908024", "0.55841166", "0.558074", "0.55753374", "0.5570626", "0.5555742", "0.5540007", "0.55373496", "0.55332404", "0.55177104", "0.55013925", "0.5488639", "0.5486654", "0.5479328", "0.54763126", "0.5474097", "0.5470791", "0.54549825", "0.5442651", "0.5442638", "0.54374", "0.5437281", "0.54298836", "0.5426299", "0.5418472", "0.54146934", "0.5414652", "0.5412961", "0.5408276", "0.5399377", "0.5389025", "0.5386802", "0.53856725", "0.53846276", "0.5381253", "0.5381108", "0.5377615", "0.5369272", "0.5369213", "0.53682196", "0.53675324", "0.5367388", "0.53629434", "0.5361157", "0.5361157", "0.53603244", "0.5359162", "0.5358729", "0.5354467", "0.53512985", "0.5350586", "0.53493637", "0.53474325", "0.5344251", "0.5341885", "0.5336419" ]
0.74654466
0
Java Float Matrix is a 1D array writen column after column. Numpy reads row after row, therefore, we need a conversion.
def _float_matrix2numpy(self, java_float_matrix): columns_input = java_float_matrix.toArray() split = lambda lst, sz: [numpy.fromiter(lst[i:i+sz],dtype=numpy.float) for i in range(0, len(lst), sz)] cols = split(columns_input, java_float_matrix.rows) matrix = numpy.ma.column_stack(cols) return matrix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrixToFloatMatrix(matrix):\n\n float_matrix = [matrix(i, j) for i in xrange(4) for j in xrange(4)]\n\n outMatrix = OpenMaya.MFloatMatrix()\n OpenMaya.MScriptUtil.createFloatMatrixFromList(float_matrix , outMatrix)\n\n return outMatrix", "def itemsToFloat(self):\n returnvalue = Matrix()\n for row in self._value:\n newRow = list()\n for item in row:\n newRow.append(float(item))\n returnvalue.addRow(*newRow)\n return returnvalue", "def _cast_to_float64(matrix):\n return matrix.astype(np.float64) if matrix.dtype != np.float64 else matrix", "def read_as_float_matrix(path):\n\n bgr_image = cv2.imread(path)\n return bgr_image.astype(np.float32) / 255.", "def getFileAsFloatMatrix(dirPath, columns, delim=\",\"):\n\tmat = list()\n\tfor rec in fileSelFieldsRecGen(dirPath, columns, delim):\n\t\tmat.append(asFloatList(rec))\n\treturn mat", "def as_matrix(self) -> types.Matrix:", "def write_float_array(f, path, values, dtype='f8'):\n dset = f.create_dataset(path, (len(values),), dtype=dtype)\n dset[:] = values\n f.flush()", "def test_convert_numpy_to_libffm(self):\n file = tempfile.NamedTemporaryFile(delete=False)\n\n # write data to libffm format\n write_data_to_xlearn_format(self.X, self.y, file.name, fields=self.fields)\n\n # read back data from file\n X_true, y_true, field_true = self._read_libffm_file(file.name)\n file.close()\n if os.path.exists(file.name):\n os.remove(file.name)\n\n assert np.all(np.isclose(self.X, X_true))\n assert np.all(self.y.ravel() == y_true.ravel())\n assert np.all(self.fields.ravel() == field_true.ravel())", "def floatArrayToPrt(float_array):\n\n util = OpenMaya.MScriptUtil() \n util.createFromList(float_array, len(float_array))\n\n return util.asFloatPtr()", "def new_float(*args, **kwargs):\n return array.array(FLOAT_TYPECODE, *args, **kwargs)", "def getFloatArray2D(self) -> typing.List[typing.List[float]]:\n ...", "def write_matrix_to_binary(file_name, val):\n with open(file_name, 'wb') as file:\n nrow = val.shape[0]\n ncol = val.shape[1]\n file.write(int32_to_bytes(nrow) + int32_to_bytes(ncol) + val.astype(float).tobytes(order='C'))", "def removeMatrixTranslate(matrix):\n\n float_matrix = [matrix(i, j) for i in xrange(4) for j in xrange(4)]\n for idx in range(12, 15):\n float_matrix[idx] = 0.0\n \n outMatrix = OpenMaya.MFloatMatrix()\n OpenMaya.MScriptUtil.createFloatMatrixFromList(float_matrix , outMatrix)\n\n return outMatrix", "def to_matrix(self, normalize: bool = True) -> jnp.ndarray:\n return NotImplemented # pragma: no cover", "def convertToFloatArray(booleanArray: typing.List[bool]) -> typing.List[float]:\n ...", "def readCSVasFloat(filename):\n returnArray = []\n lines = open(filename).readlines()\n for line in lines:\n line = line.strip().split(\",\")\n if len(line) > 0:\n returnArray.append(np.array([np.float32(x) for x in line]))\n\n returnArray = np.array(returnArray)\n return returnArray", "def cfloat(queue=None):\n return np.float32", "def read_float(filename):\n\tf = open(filename, \"r\")\n\tarr = np.fromfile(f, dtype='>f4')\n\treturn arr", "def convert_stream(self, stream):\n return np.fromstring(stream, \"Float32\")", "def get_float_data(dataframe):\n dataframe = dataframe[np.isfinite(dataframe.TIME_StartTime)]\n float_cols = [isfloatarray(col) for col in dataframe.values.T]\n return (dataframe.T[float_cols].T).astype(float)", "def column(matrix, i):\r\n return [row[i] for row in matrix if isfloat(row[i])]", "def np_floats(self):\n # A numpy-array packaging of the floating-point input parameters\n return np.array([self.chi1, self.chi2, self.chip, self.thetaJ, self.m1, self.m2,\n self.distance, self.alpha0, self.phic, self.fref], dtype=np.float64)", "def test_op_fillfrom_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n offl_r = stream.empty_like(a)\n offl_r.fillfrom(a)\n r = offl_r.update_host().array\n stream.sync()\n self.assertTrue((a == r).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(a, r))", "def getFileAsFiltFloatMatrix(dirPath, filt, columns, delim=\",\"):\n\tmat = list()\n\tfor rec in fileFiltSelFieldsRecGen(dirPath, filt, columns, delim):\n\t\tmat.append(asFloatList(rec))\n\treturn mat", "def test_op_one_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n offl_a = stream.bind(a)\n offl_a.one()\n offl_a.update_host()\n stream.sync()\n self.assertTrue((a == 1.0).all(),\n \"Array should be all one.\" + str(a))", "def __type_of_elements_correct_long_Numpy_matrix(self):\n strTestName = 'Float elements in a Numpy matrix (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy matrix #1')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramTypeEl('parameter1', (int, float))\n\n RxCSObject.paramAddMan('parameter2', 'Numpy vector #2')\n RxCSObject.paramType('parameter2', np.ndarray)\n RxCSObject.paramTypeEl('parameter2', (int, float))\n\n RxCSObject.parameter1 = np.random.randn(1e2,1e2)\n RxCSObject.parameter2 = np.random.randn(1e2,1e2)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def test_convert_matrix():\n foo = Value(matrices=[[1.0, 2.0], [-2.0, 1.0]])\n assert foo.matrices[0][0][1].value == 2.0\n assert foo.matrices[0][1][0].value == -2.0", "def _numpy_2_native_matrix(numpy_mat):\n\n # Create native matrix object\n packed_vec = _pack_numpy_matrix(numpy_mat)\n return rqrmilib.create_matrix(packed_vec)", "def wrapDBMatrix(self,mat):\n return mat.todense()", "def uint8_to_float(im: np.array):\n if im.dtype == np.float32:\n warnings.warn(\"Image is already np.float32\")\n return im\n im = im.astype(np.float32) / 255\n return im", "def uint8_to_float(im: np.array):\n if im.dtype == np.float32:\n warnings.warn(\"Image is already np.float32\")\n return im\n im = im.astype(np.float32) / 255\n return im", "def getFloat(self):\n try:\n data, n = self._buf.pop(_F_LEN)\n \n if n != _F_LEN:\n raise SerializationError('There is not enough data left.')\n \n return _F_STRUCT.unpack(data)[0]\n except StructError as e:\n raise SerializationError('Data is not a valid float: '\n '{0}'.format(e))", "def f1(self) -> float:\n return self._matrix.f1", "def readFloat(self) -> float:\n return self._unpack('!f', 4)", "def read_input_float_feature(feature_map, key, shape):\n if shape is None:\n (dim_z, dim_y, dim_x) = feature_map.feature[key + '/dim'].int64_list.value\n else:\n (dim_z, dim_y, dim_x) = shape\n tensor = np.array(feature_map.feature[key].float_list.value[:]).reshape(\n dim_z, dim_y, dim_x)\n return tensor", "def getFileAsFloatColumn(dirPath):\n\tflist = list()\n\tfor rec in fileRecGen(dirPath, None):\n\t\tflist.append(float(rec))\n\treturn flist", "def import_matrix(fileMatrix):\n with open(fileMatrix) as fMat:\n matrix = np.zeros((3,4))\n for ligne in fMat:\n if ligne.startswith(' 1') or ligne.startswith(' 2') or ligne.startswith(' 3'):\n matrix[int(ligne.split()[0])-1,:] = float(ligne.split()[1]),float(ligne.split()[2]),float(ligne.split()[3]),float(ligne.split()[4])\n return deepcopy(matrix)", "def writeMatrix(self):\n\t\tpass", "def _float_metric_value(metric):\n return metric.result().numpy().astype(float)", "def parse_matrix(lines):\r\n col_headers = None\r\n result = []\r\n row_headers = []\r\n for line in lines:\r\n if line[0] == '#':\r\n continue\r\n if line[0] == '\\t': # is header\r\n col_headers = map(strip, line.split('\\t')[1:])\r\n else:\r\n entries = line.split('\\t')\r\n result.append(map(float, entries[1:]))\r\n row_headers.append(entries[0])\r\n return col_headers, row_headers, asarray(result)", "def read_matrix_from_binary(file_name):\n with open(file_name, 'rb') as file:\n buffer = file.read()\n n_row = int.from_bytes(buffer[0:4], 'little')\n n_col = int.from_bytes(buffer[4:8], 'little')\n matrix = numpy.frombuffer(buffer[8:], dtype=float).reshape([n_row, n_col])\n return matrix", "def py_make_float_array(cst, op_version=None):\n return numpy.array([cst], dtype=numpy.float32)", "def _serialize_double_matrix(m):\n if (type(m) == ndarray and m.dtype == float64 and m.ndim == 2):\n rows = m.shape[0]\n cols = m.shape[1]\n ba = bytearray(24 + 8 * rows * cols)\n header = ndarray(shape=[3], buffer=ba, dtype=\"int64\")\n header[0] = 2\n header[1] = rows\n header[2] = cols\n copyto(ndarray(shape=[rows, cols], buffer=ba, offset=24,\n dtype=\"float64\", order='C'), m)\n return ba\n else:\n raise TypeError(\"_serialize_double_matrix called on a \"\n \"non-double-matrix\")", "def _native_matrix_2_numpy(mat):\n\n if 'RQRMI matrix' not in str(mat):\n raise ValueError('Input is not valid rqrmi matrix object')\n return np.array(rqrmilib.matrix_to_list(mat))", "def tfidfTransform(matrix):\n matrix_float = numpy.zeros(matrix.shape)\n document_Total = len(matrix)\n rows, cols = matrix.shape\n for row in xrange(0, rows): # For each document\n wordTotal = reduce(lambda x, y: x + y, matrix[row])\n for col in xrange(0, cols): # For each term\n # For consistency ensure all self.matrix values are floats\n matrix[row][col] = float(matrix[row][col])\n if matrix[row][col] != 0:\n term_document_occurences = __getTermDocumentOccurences(col, matrix)\n term_frequency = matrix[row][col] / float(wordTotal)\n inverse_document_frequency = math.log(abs(document_Total / float(term_document_occurences)))\n # print(term_document_occurences, term_frequency, inverse_document_frequency, term_frequency *\n # inverse_document_frequency)\n matrix_float[row][col] = term_frequency * inverse_document_frequency\n # print(matrix_float[row][col])\n return matrix_float", "def getMultipleFileAsFloatMatrix(dirPathWithCol, delim=\",\"):\n\tmat = list()\n\tminLen = -1\n\tfor path, col in dirPathWithCol:\n\t\tcolVals = getFileColumnAsFloat(path, col, delim)\n\t\tif minLen < 0 or len(colVals) < minLen:\n\t\t\tminLen = len(colVals)\n\t\tmat.append(colVals)\n\t\n\t#make all same length\n\tmat = list(map(lambda li:li[:minLen], mat))\t\n\treturn mat", "def make_float(array):\n finial_array = []\n\n for number in array:\n finial_array.append(float(number))\n return finial_array", "def _serialize_matrix(matrix):\n\n f = BytesIO()\n spsp.save_npz(f, matrix)\n result = f.getvalue().hex()\n return result", "def __type_of_elements_incorrect_float_in_Numpy_vector(self):\n strTestName = 'Float elements in a Numpy vector (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy vector #1')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramTypeEl('parameter1', (float))\n\n RxCSObject.paramAddMan('parameter2', 'Numpy vector #2')\n RxCSObject.paramType('parameter2', np.ndarray)\n RxCSObject.paramTypeEl('parameter2', (int))\n\n RxCSObject.parameter1 = np.random.randn(1e2)\n RxCSObject.parameter2 = np.random.randn(1e2)\n\n self.__parametersCheck_error(RxCSObject, ElementTypeError, strTestName)", "def floatX(arr):\n return np.asarray(arr, dtype=theano.config.floatX)", "def binfloat(filename):\n return np.memmap(filename, dtype='float32')", "def read_float(self):\n return self._packers[\"f\"].unpack(self.read(4))[0]", "def isfloatarray(cell):\n try:\n cell.astype(float)\n return True\n except ValueError:\n return False", "def update_F_matrix(self, F_matrix):\n self.F_matrix = F_matrix", "def asfloat(value):\n float_type = 'float32'\n\n if isinstance(value, (np.matrix, np.ndarray)):\n if value.dtype != np.dtype(float_type):\n return value.astype(float_type)\n\n return value\n\n elif isinstance(value, (tf.Tensor, tf.SparseTensor)):\n return tf.cast(value, tf.float32)\n\n elif issparse(value):\n return value\n\n float_x_type = np.cast[float_type]\n return float_x_type(value)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.base.header.seq, _x.base.header.stamp.secs, _x.base.header.stamp.nsecs))\n _x = self.base.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_5f.pack(_x.base.cell_width, _x.base.cell_height, _x.base.position.x, _x.base.position.y, _x.base.position.z))\n length = len(self.base.points)\n buff.write(_struct_I.pack(length))\n for val1 in self.base.points:\n _x = val1\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.z))\n length = len(self.base.intensity)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(self.base.intensity.tostring())\n buff.write(_struct_b.pack(self.base.cost))\n _x = self.base.label\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_2f.pack(_x.mean_height, _x.mean_intensity))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def test_message_float():\n result = True\n\n message = msg.Message()\n for i in range(num_it):\n message.appendFloat(i/128.789456)\n if message.length != msg.HEADER_SIZE + (i+1)*msg.floatStruct.size:\n print(\"Size is \", message.length, \" but should be \", msg.HEADER_SIZE + (i+1)*msg.floatStruct.size)\n print(\"Error : message.appendFloat\")\n result = False\n\n message.resetCursor()\n for i in range(num_it):\n r = message.readFloat()\n if abs(r - i/128.789456) > 0.000001:\n print(r, \" vs \", i/128.789456)\n print(\"Error : message.read/appendFloat\")\n result = False\n\n return result", "def readFloats(path, dimensions, header=0):\n size = reduce(operator.mul, dimensions)\n ra = RandomAccessFile(path, 'r')\n try:\n ra.skipBytes(header)\n bytes = zeros(size * 4, 'b')\n ra.read(bytes)\n floats = zeros(size, 'f')\n ByteBuffer.wrap(bytes).asFloatBuffer().get(floats)\n return ArrayImgs.floats(floats, dimensions)\n finally:\n ra.close()", "def dataConvertToNumpy( self ):\n self.featureNumpy = np.asarray( self.feature )\n self.ClassNumpy = np.asarray( self.Class )", "def getFloatRow(self, int: int) -> typing.List[float]:\n ...", "def j_sparse_vector_wrapper_to_scipy_spmatrix(j_obj: JavaObject):\n indices = np.frombuffer(j_obj.getIndicesBytes(), dtype=\"<i4\")\n values = np.frombuffer(j_obj.getValuesBytes(), dtype=\"<f8\")\n size = j_obj.getSize()\n indptr = np.array([0, indices.shape[0]], dtype=np.int32)\n return csr_matrix((values, indices, indptr), shape=(1, size), dtype=np.float64).todok()", "def py2mat(myobj):\n if isinstance(myobj, pandas.Series):\n mat = r.matrix(myobj,\n rownames=myobj.index,\n dimnames=myobj.name)\n else:\n mat = r.matrix(myobj)\n return mat", "def read_float(stream, writer_schema=None, reader_schema=None): # noqa\n return unpack('<f', stream.read(4))[0]", "def fread_matrix(stream):\n\n A = fread_vector(stream)\n while (True):\n v = fread_vector(stream)\n if (v.shape[0] == 0):\n return (A)\n A = numpy.vstack((A, v))", "def _convert_matrix(m):\n\n return [m[0][0], m[0][1], m[0][2], m[0][3],\n m[2][0], m[2][1], m[2][2], m[2][3],\n -m[1][0], -m[1][1], -m[1][2], -m[1][3],\n m[3][0], m[3][1], m[3][2], m[3][3]]", "def decode_float(self, buf, pos):\n return self.decode_struct(self._float_fmt, buf, pos)", "def _shorts2float(lo_byte_pair, hi_byte_pair):\n\tba = bytearray(struct.pack(\"HH\", lo_byte_pair, hi_byte_pair))\n\t[f] = struct.unpack('f', ba)\n\treturn f", "def __convert_two_cols(raw_array: list, num_rows: int):\n\n # no need to check extra row, we can go right into conversions\n for i in range(num_rows):\n\n # float conversion\n float_value = float(raw_array[i][1])\n raw_array[i][1] = float_value\n\n # no need to delete an extra entry,\n # we can just convert the existing string and assign it\n timestamp = pd.Timestamp(raw_array[i][0])\n raw_array[i][0] = timestamp\n\n return", "def strToFloatArray(line, delim=\",\"):\n\tarr = line.split(delim)\n\treturn [float(a) for a in arr]", "def data_convert2float32 (self, data):\r\n data = data.astype(np.float32)\r\n\r\n return data", "def parse_xml_matrix(mat_element):\n rows = int(mat_element.find(\"rows\").text)\n cols = int(mat_element.find(\"cols\").text)\n type_flag = mat_element.find(\"dt\").text\n if type_flag == \"f\":\n dtype = np.float32\n elif type_flag == \"d\":\n dtype = np.float64\n else:\n raise ValueError(\"dtype flag \" + type_flag + \" not supported.\")\n data_string = mat_element.find(\"data\").text\n data = np.array([float(part) for part in data_string.strip().split(\" \") if len(part) > 0])\n return data.reshape((rows, cols)).astype(dtype)", "def solution(array):\n array1 = np.array(array)\n array2 = array1.astype(float)\n return array2", "def test_renderer_works_correctly_with_numpy_floating(self):\n data = numpy.float32(0.0)\n rendered = self.renderer.render(\n data=data, media_type=\"application/json\", renderer_context={}\n )\n reloaded = orjson.loads(rendered)\n\n self.assertEqual(reloaded, data)", "def convertToFloat(boolean: bool) -> float:\n ...", "def transform_mat(matrix):\n delta = 1e-5\n matrix = matrix + delta\n return matrix", "def matrix2bytes(matrix):\n return bytes(sum(matrix, []))", "def _image_to_vector(image):\n return image.flatten().astype(float)", "def to_mat(self) -> np.matrix:\n raise NotImplementedError", "def floats(self) -> List[NumericType]:\n return [float(v) for v in self._record]", "def test_float(self):\n htype = h5t.py_create('f')\n self.assertIsInstance(htype, h5t.TypeFloatID)", "def process(self, mat):", "def _convert_data(self, data):\n if isinstance(data, np.ndarray):\n data = data.astype(float_save(), copy=False)\n if self.compress:\n return KaldiCompressedMatrix.compress(data, self.compression_method)\n return KaldiMatrix(data)\n\n if isinstance(data, KaldiMatrix):\n if self.compress:\n return KaldiCompressedMatrix.compress(data, self.compression_method)\n return data\n\n if isinstance(data, KaldiCompressedMatrix):\n if not self.compress:\n return data.to_matrix()\n return data\n\n raise ValueError(\"Data is not ndarray or KaldiMatrix\")", "def _decode_float(fp):\n return struct.unpack('>f', fp.read(4))[0]", "def _values_number(self) -> ndarray:\n if 'f' in self._data:\n arr_dtype = 'float64'\n else:\n arr_dtype = 'int64'\n\n col_num: int = 0\n for kind, arr in self._data.items():\n if kind in 'ifb':\n col_num += arr.shape[1]\n shape: Tuple[int, int] = (len(self), col_num)\n\n v: ndarray = np.empty(shape, dtype=arr_dtype, order='F')\n for i, (_, col_arr, dtype, _) in enumerate(self._col_info_iter(with_arr=True)):\n if dtype in 'ifb':\n v[:, i] = col_arr\n return v", "def test_op_add_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n o = a + 1.0\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_r = offl_a + o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def ts_float32(val):\n return np.float64(val)", "def array_to_gf_array(data, m_gf = 1, floatprec = 64):\n data_binary = []\n for val in data:\n data_binary.append(binary_string_to_gf_elements(float_to_binary(val, floatprec), m_gf))\n return np.array(data_binary,dtype=int)", "def test_op_zero_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n offl_a = stream.bind(a)\n offl_a.zero()\n offl_a.update_host()\n stream.sync()\n self.assertEqual(sum(a), 0.0,\n \"Array should be all zeros.\")", "def csr2tensor(self, matrix: sp.csr_matrix):\n matrix = matrix.tocoo()\n x = torch.sparse.FloatTensor(\n torch.LongTensor(np.array([matrix.row, matrix.col])),\n torch.FloatTensor(matrix.data.astype(np.float32)),\n matrix.shape,\n ).to(self.device)\n return x", "def to_float32(elem):\n return elem.astype(np.float32)", "def to_matrix(array):\n return Matrix(array.tolist())", "def PLCTYPE_ARR_REAL(n: int) -> Type[Array]:\n return c_float * n", "def save_float16_npy(data, path):\n np.save(path, data.astype(np.float16))", "def load_file(file_name) -> np.ndarray:\r\n reader = csv.reader(open(file_name, \"r\"), delimiter=',')\r\n x_rdr = list(reader)\r\n return np.array(x_rdr).astype('float')", "def _resolve_float(self, item):\n if not type(item) in (float, int, str, np.number) and not np.isscalar(item):\n if isinstance(item, np.ndarray) and item.shape == ():\n item = float(item)\n else:\n item = item[0]\n\n return item", "def write_float32(self, f: float) -> None:\n self.buffer += struct.pack(\"<f\", f)", "def as_matrix(self):\n return self._data", "def _fileToMatrix(file_name):\r\n # TODO: np.loadtxt() might be an alternative\r\n # try:\r\n if 1 < 3:\r\n lres = []\r\n for line in open(file_name, 'r').readlines():\r\n if len(line) > 0 and line[0] not in ('%', '#'):\r\n lres.append(list(map(float, line.split())))\r\n res = lres\r\n else:\r\n fil = open(file_name, 'r')\r\n fil.readline() # rudimentary, assume one comment line\r\n lineToRow = lambda line: list(map(float, line.split()))\r\n res = list(map(lineToRow, fil.readlines()))\r\n fil.close() # close file could be omitted, reference counting should do during garbage collection, but...\r\n\r\n while res != [] and res[0] == []: # remove further leading empty lines\r\n del res[0]\r\n return res\r\n # except:\r\n print('could not read file ' + file_name)", "def test_float_log(self):\n htype = h5t.py_create('f', logical=True)\n self.assertIsInstance(htype, h5t.TypeFloatID)", "def _frz(a):\n if a.ndim == 0:\n a.shape = (1,)\n return a" ]
[ "0.7274915", "0.66002345", "0.6478091", "0.62763405", "0.59943956", "0.58819616", "0.58508307", "0.58382297", "0.58107084", "0.5799", "0.57688916", "0.57163143", "0.5654517", "0.56448877", "0.56354064", "0.5616841", "0.5608916", "0.55774224", "0.5566227", "0.5564168", "0.5546812", "0.5541829", "0.5540548", "0.55202895", "0.5496594", "0.54946804", "0.5492568", "0.54916364", "0.547437", "0.54563665", "0.54563665", "0.54470193", "0.54364645", "0.5393177", "0.53895694", "0.5387334", "0.5363123", "0.53520215", "0.53430223", "0.53384435", "0.5334494", "0.53341305", "0.53245634", "0.53146183", "0.5285846", "0.5273028", "0.52642286", "0.5238008", "0.523765", "0.5222317", "0.5219979", "0.52159363", "0.5202647", "0.51958007", "0.51954144", "0.5186917", "0.5186651", "0.51865774", "0.5182645", "0.5175491", "0.5170876", "0.5157403", "0.51572406", "0.5155768", "0.5154924", "0.51546043", "0.51434004", "0.5143276", "0.5138756", "0.51293", "0.51280993", "0.5124409", "0.51206964", "0.5115387", "0.51124156", "0.5104317", "0.50838053", "0.50823843", "0.5079624", "0.50768137", "0.5067845", "0.50668705", "0.5057508", "0.5054954", "0.50480044", "0.5047006", "0.5043665", "0.5042352", "0.50407255", "0.5035432", "0.5033099", "0.5026417", "0.50236076", "0.50227505", "0.5020943", "0.5014699", "0.501064", "0.5004021", "0.500301", "0.5002116" ]
0.7982614
0
Set the parameters for the TensorCoFi
def setParams(self,dim=20, nIter=5, lamb=0.05, alph=40): self._dim = dim self._nIter = nIter self._lamb = lamb self._alph = alph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.gtg.set_parameters(self.parameters)\n self.avoidobstacles.set_parameters(self.parameters)\n self.wall.set_parameters(self.parameters)", "def set_params(self, **parameters):\n for parameter, value in parameters.items():\n if parameter == 'predictor':\n if isinstance(value, chainer.Link):\n del self.predictor\n with self.init_scope():\n self.predictor = value\n else:\n assert False, 'predictor is not Chain instance'\n elif parameter in ['lossfun', 'accfun', 'device']:\n setattr(self, parameter, value)\n else:\n self.sk_params.update({parameter: value})\n return self", "def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n if params['verbose'] is not None:\n self.verbose = params['verbose']\n\n self.num_features = self.training_instances[0].get_feature_count()\n self.w = None\n self.b = None", "def _set_parameters(self, parameters):\n self.parameters = parameters\n self._set_points_and_weights()", "def setParameters(self):\n\n # Set the parameters\n self.taux = 24.2\n self.mu = 0.23\n self.G = 33.75\n self.alpha_0 = 0.05\n self.delta = 0.0075\n self.p = 0.50\n self.I0 = 9500.0\n self.kparam = 0.55", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.blending.set_parameters(self.parameters)", "def set_hyperparams(self, params):", "def set_parameters(self, new_param):\n\n current_idx = 0\n for idx, param in enumerate(self.__network.parameters()):\n temp_param = \\\n new_param[current_idx:current_idx + self.__net_sizes[idx]]\n temp_param = temp_param.reshape(self.__net_shapes[idx])\n param.data = tr.from_numpy(temp_param).float()\n current_idx += self.__net_sizes[idx]", "def set_params(self, params):\n cpt = 0\n for param in self.parameters():\n tmp = np.product(param.size())\n\n if torch.cuda.is_available():\n param.data.copy_(torch.from_numpy(\n params[cpt:cpt + tmp]).view(param.size()).cuda())\n else:\n param.data.copy_(torch.from_numpy(\n params[cpt:cpt + tmp]).view(param.size()))\n cpt += tmp", "def set_params(self):\n raise NotImplementedError", "def set_training_parameters(\n self,\n config: ConfigDict,\n len_train: int,\n len_test: int,\n ):\n self.configure_steps(config, len_train, len_test)\n self.configure_reporting(config)\n self.configure_training_functions(config)", "def define_parameters(self):\n self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.in_channels, self.out_channels))\n self.bias = torch.nn.Parameter(torch.Tensor(self.out_channels))", "def define_parameters(self):\n self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.in_channels, self.out_channels))\n self.bias = torch.nn.Parameter(torch.Tensor(self.out_channels))", "def set_parameters(self, **kwargs):\n self.__select_k_best.set_params(**kwargs)", "def params(self,new):\n self._params = new\n self._config_set()\n self._make_model()", "def set_params(self, new_params: torch.Tensor) -> None:\n assert new_params.size() == self.get_params().size()\n progress = 0\n for pp in list(self.net.parameters()):\n cand_params = new_params[progress: progress +\n torch.tensor(pp.size()).prod()].view(pp.size())\n progress += torch.tensor(pp.size()).prod()\n pp.data = cand_params", "def _set_controller_parameters(self, P=None, I=None, D=None):\n pass", "def init_params(self):\n self.conv = Conv(self.conv_layers[0][-1], self.out_channels, padding=self.padding,stride=self.stride)\n self.W = torch.randn(self.num_labels, self.cout_numel, requires_grad=True)\n self.T = torch.randn(self.num_labels, self.num_labels, requires_grad=True)", "def set_params(self):\r\n pass", "def set_parameters(self, par):\n try:\n for l in self.cell.layers:\n r_curve = cmf.VanGenuchtenMualem(\n Ksat=10**par.pKsat, phi=par.porosity, alpha=par.alpha, n=par.n\n )\n r_curve.w0 = r_curve.fit_w0()\n l.soil = r_curve\n self.cell.saturated_depth = 0.5\n self.gw.potential = self.cell.z - 0.5\n except RuntimeError as e:\n sys.stderr.write(\"Set parameters failed with:\\n\" + str(par) + \"\\n\" + str(e))\n raise", "def setup_parameters(self):\n structure = self.ctx.structure_initial_primitive\n ecutwfc = []\n ecutrho = []\n\n for kind in structure.get_kind_names():\n try:\n dual = self.ctx.protocol['pseudo_data'][kind]['dual']\n cutoff = self.ctx.protocol['pseudo_data'][kind]['cutoff']\n cutrho = dual * cutoff\n ecutwfc.append(cutoff)\n ecutrho.append(cutrho)\n except KeyError as exception:\n self.abort_nowait('failed to retrieve the cutoff or dual factor for {}'.format(kind))\n\n natoms = len(structure.sites)\n conv_thr = self.ctx.protocol['convergence_threshold'] * natoms\n\n self.ctx.inputs['parameters'] = {\n 'CONTROL': {\n 'restart_mode': 'from_scratch',\n 'tstress': self.ctx.protocol['tstress'],\n },\n 'SYSTEM': {\n 'ecutwfc': max(ecutwfc),\n 'ecutrho': max(ecutrho),\n 'smearing': self.ctx.protocol['smearing'],\n 'degauss': self.ctx.protocol['degauss'],\n 'occupations': self.ctx.protocol['occupations'],\n },\n 'ELECTRONS': {\n 'conv_thr': conv_thr,\n }\n }", "def initialize_parameters():\n\n W1 = tf.get_variable('W1', [3,3,3,64], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W2 = tf.get_variable('W2', [3,3,64,128], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W3 = tf.get_variable('W3', [3,3,128,256], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W4 = tf.get_variable('W4', [3,3,256,512], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W5 = tf.get_variable('W5', [3,3,512,512], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n\n ### END CODE HERE ###\n\n parameters = {\"W1\": W1,\n \"W2\": W2,\n \"W3\": W3,\n \"W4\": W4,\n \"W5\": W5\n }\n\n return parameters", "def setParams(self, tfInputGraph=None, inputMapping=None, outputMapping=None, tfHParms=None):\n super(TFTransformer, self).__init__()\n kwargs = self._input_kwargs\n # Further conanonicalization, e.g. converting dict to sorted str pairs happens here\n return self._set(**kwargs)", "def __init__(self, graph=None, *args, **kwargs):\n # Set TensorFlow graph. All TF code will work on this graph.\n self.graph = graph or tf.Graph()\n self.SetParams(*args, **kwargs)", "def __init__(self, tfInputGraph=None, inputMapping=None, outputMapping=None, tfHParms=None):\n super(TFTransformer, self).__init__()\n kwargs = self._input_kwargs\n self.setParams(**kwargs)", "def set_parameters(self, We1,be1, We2, be2, We3, be3, Wmu, bmu, Wstd, bstd, Wd1, bd1, Wd2, bd2, Wd3, bd3):\r\n self.en_fc1.weight=nn.Parameter(We1)\r\n self.en_fc1.bias=nn.Parameter(be1)\r\n \r\n self.en_fc2.weight=nn.Parameter(We2)\r\n self.en_fc2.bias=nn.Parameter(be2)\r\n \r\n self.en_fc3.weight=nn.Parameter(We3)\r\n self.en_fc3.bias=nn.Parameter(be3)\r\n \r\n self.en_mu.weight=nn.Parameter(Wmu)\r\n self.en_mu.bias=nn.Parameter(bmu)\r\n \r\n self.en_log.weight=nn.Parameter(Wstd)\r\n self.en_log.bias=nn.Parameter(bstd)\r\n \r\n self.de_fc1.weight=nn.Parameter(Wd1)\r\n self.de_fc1.bias=nn.Parameter(bd1)\r\n \r\n self.de_fc2.weight=nn.Parameter(Wd2)\r\n self.de_fc2.bias=nn.Parameter(bd2)\r\n \r\n self.de_fc3.weight=nn.Parameter(Wd3)\r\n self.de_fc3.bias=nn.Parameter(bd3)\r\n \r\n return", "def _set_params(self,x):\r\n self.k._set_params(x)", "def set_params(self, **params):\n if('threshold' in params.keys()):\n self.threshold = params['threshold']\n if('subsample' in params.keys()):\n self.subsample = params['subsample']\n if('estimator' in params.keys()):\n self.estimator = params['estimator']\n if('n_folds' in params.keys()):\n self.n_folds = params['n_folds']\n if('stratify' in params.keys()):\n self.stratify = params['stratify']\n if('random_state' in params.keys()):\n self.random_state = params['random_state']\n if('n_jobs' in params.keys()):\n self.n_jobs = params['n_jobs']", "def set_shape_params(self, params):\n self.alpha = params[0]\n self.beta = params[1]\n self.gamma = params[2]\n self.c500 = params[3]\n self.P0 = params[4]", "def set_params(self, **kwargs):\n\n # We don't want non-functional arguments polluting kwargs\n params = kwargs.copy()\n for k in ['function', 'target']:\n params.pop(k, None)\n\n self.kwargs.update(params)\n BaseEstimator.set_params(self, **kwargs)", "def _set_model_parameters(self, verbose=False):\n from scipy.special import gamma\n\n z0 = self.z0\n\n # set parameters that are constants\n p_v, d_v, cs0, sigma, vout0 = (1, 2, 6.7, 0.1, 25.0)\n p_vB, d_vB, Mach0, p_M, d_M = (4, 2, 0.5, 1, 3)\n\n # calculate amplitudes that make the pdf integrate to 1\n A_v = np.log(10)*p_v/gamma(d_v/p_v)\n A_cs = np.log(10)/np.sqrt(2*np.pi)/sigma\n A_vB = np.log(10)*p_vB/gamma(d_vB/p_vB)\n A_M = np.log(10)*p_M/gamma(d_M/p_M)\n\n # store them in dictionaries\n self.cool_params = dict(A_v=A_v, p_v=p_v, d_v=d_v,\n A_cs=A_cs, cs0=cs0, sigma=sigma, vout0=vout0)\n self.hot_params = dict(A_vB=A_vB, p_vB=p_vB, d_vB=d_vB,\n A_M=A_M, Mach0=Mach0,p_M=p_M,d_M=d_M)\n # SN related parameters that set the reference values for loading factors\n self.params = dict(Esn=1.e51*au.erg, mstar=95.5*au.M_sun, vcool=200*au.km/au.s,\n Mej=10.*au.M_sun, ZSN=0.2, ZISM0=0.02)\n self.params['vej'] = np.sqrt(2.0*self.params['Esn']/self.params['Mej']).to('km/s')\n self.ref_params = dict(Mref=self.params['mstar'],\n pref=self.params['Esn']/(2*self.params['vcool']),\n Eref=self.params['Esn'],\n Zref=self.params['Mej']*self.params['ZSN'])\n\n # coefficients used in conversion from mass to other PDFs\n self.vp = (self.ref_params['pref']/self.params['mstar']).to('km/s').value\n self.vE = np.sqrt(self.ref_params['Eref']/self.params['mstar']).to('km/s').value\n self.Ze = (self.ref_params['Zref']/self.params['mstar']).cgs.value\n\n # parameters for scaling relations from Paper~I\n a = np.array(fit_alpha[z0])\n b = np.array(fit_beta[z0])\n\n self.scaling_params = dict(a=a, b=b)\n if z0 == '2H':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 7.5\n elif z0 == '500':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 8.5\n elif z0 == '1000':\n self.cool_params['vout0'] = 60\n self.cool_params['cs0'] = 10.0\n self.scaling_params['A'] = np.round(10.**(np.array(self.scaling_params['a'])),2)\n self.scaling_params['p'] = 1.+np.array(self.scaling_params['b'])\n self.enum=dict(M_cool=0, M_int=1, M_hot=2, M_total=3,\n p_cool=4, p_int=5, p_hot=6, p_total=7,\n E_cool=8, E_int=9, E_hot=10, E_total=11,\n Z_cool=12, Z_int=13, Z_hot=14, Z_total=15)\n\n # print parameters\n if verbose:\n self.show_parameters()", "def set_params(self, params):", "def _initialize_parameters(self):\n self.ent_emb = tf.get_variable('ent_emb', shape=[len(self.ent_to_idx), self.k * 2],\n initializer=self.initializer)\n self.rel_emb = tf.get_variable('rel_emb', shape=[len(self.rel_to_idx), self.k * 2],\n initializer=self.initializer)", "def _set_training_params(self, params):\n self.lyapunov_hybrid_system.lyapunov_relu.load_state_dict(\n params[\"lyap_relu_params\"])\n if not self.R_options.fixed_R:\n self.R_options._variables = params[\"R_params\"].clone()\n if isinstance(self.lyapunov_hybrid_system.system,\n feedback_system.FeedbackSystem):\n self.lyapunov_hybrid_system.system.controller_network.\\\n load_state_dict(params[\"controller_params\"])", "def set_params(self, **params):\n return self.forest.set_params(**params)", "def set_parameters(self, params):\n self.kp = params.pgain", "def set_params(self, **kwargs):\n ...", "def _initialize_parameters(self):\n self.ent_emb = tf.get_variable('ent_emb', shape=[len(self.ent_to_idx), self.k],\n initializer=self.initializer)\n self.rel_emb = tf.get_variable('rel_emb', shape=[len(self.rel_to_idx), self.k],\n initializer=self.initializer)", "def set_tf_params(model, write_graph=False):\n\n with tf.gfile.FastGFile(os.path.join(\n MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n\n if write_graph:\n # Write graph to file so that it can be visualized using TensorBoard\n summary_writer = tf.summary.FileWriter('data', graph=graph_def)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True # Do not allocale the whole GPU mem\n with tf.Session(config=config) as sess:\n copy_inception(sess, model)", "def set_params(self):\n \n lo, hi = self.R.get((self.h, self.w, self.m), (0.0, 0.0))\n params.update({\n 'gamma' : 1.0, # minesweeper is a finite horizon game\n 'epsilon': 0.0,\n 'K': 16,\n 'R_lo': lo,\n 'R_hi': hi,\n 'max_depth': self.h * self.w / 2,\n 'c':hi-lo\n })", "def set_params(self, params):\n params = dict_to_namespace(params)\n\n # Set self.params\n self.params = Namespace()\n self.params.ndimx = params.ndimx\n self.params.model_str = getattr(params, 'model_str', 'optfixedsig')\n self.params.ig1 = getattr(params, 'ig1', 4.0)\n self.params.ig2 = getattr(params, 'ig2', 3.0)\n self.params.n1 = getattr(params, 'n1', 1.0)\n self.params.n2 = getattr(params, 'n2', 1.0)\n self.params.sigma = getattr(params, 'sigma', 1e-5)\n self.params.niter = getattr(params, 'niter', 70)\n self.params.kernel = getattr(params, 'kernel', kern_matern)\n self.params.trans_x = getattr(params, 'trans_x', False)", "def set_parameters(targeted_flag='true',\r\n tv_flag='false',\r\n hinge_flag='true',\r\n cos_flag='false',\r\n interpolation='bilinear',\r\n model_type='small',\r\n loss_type='center',\r\n dataset_type='vgg',\r\n attack='CW',\r\n norm='2',\r\n epsilon=0.1,\r\n iterations=100,\r\n binary_steps=8,\r\n learning_rate=0.01,\r\n epsilon_steps=0.01,\r\n init_const=0.3,\r\n mean_loss='embeddingmean',\r\n batch_size=-1,\r\n margin=5.0,\r\n amplification=2.0):\r\n params = {}\r\n\r\n params['model_type'] = model_type\r\n params['loss_type'] = loss_type\r\n params['dataset_type'] = dataset_type\r\n params['attack'] = attack\r\n params['norm'] = norm\r\n params['epsilon'] = epsilon\r\n params['iterations'] = iterations\r\n params['binary_steps'] = binary_steps\r\n params['learning_rate'] = learning_rate\r\n params['epsilon_steps'] = epsilon_steps\r\n params['init_const'] = init_const\r\n params['mean_loss'] = mean_loss\r\n params['batch_size'] = batch_size\r\n params['targeted_flag'] = string_to_bool(targeted_flag)\r\n params['tv_flag'] = string_to_bool(tv_flag)\r\n params['hinge_flag'] = string_to_bool(hinge_flag)\r\n params['cos_flag'] = string_to_bool(cos_flag)\r\n params['margin'] = margin\r\n params['amp'] = amplification\r\n\r\n if model_type == 'small' and loss_type == 'center':\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = -1.0\r\n else:\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = 0.0\r\n\r\n if (dataset_type == 'vggsmall'):\r\n params['align_dir'] = VGG_ALIGN_160_DIR\r\n params['test_dir'] = VGG_TEST_DIR\r\n elif model_type == 'large' or dataset_type == 'casia':\r\n params['align_dir'] = ALIGN_160_DIR\r\n elif model_type == 'small':\r\n params['align_dir'] = ALIGN_96_DIR\r\n else:\r\n ValueError('ValueError: Argument must be either \"small\" or \"large\".')\r\n \r\n if interpolation == 'nearest':\r\n params['interpolation'] = cv2.INTER_NEAREST\r\n elif interpolation == 'bilinear':\r\n params['interpolation'] = cv2.INTER_LINEAR\r\n elif interpolation == 'bicubic':\r\n params['interpolation'] = cv2.INTER_CUBIC\r\n elif interpolation == 'lanczos':\r\n params['interpolation'] = cv2.INTER_LANCZOS4\r\n elif interpolation == 'super':\r\n print('finish later')\r\n else:\r\n raise ValueError('ValueError: Argument must be of the following, [nearest, bilinear, bicubic, lanczos, super].')\r\n\r\n if params['hinge_flag']:\r\n params['attack_loss'] = 'hinge'\r\n else:\r\n params['attack_loss'] = 'target'\r\n if not params['targeted_flag']:\r\n params['attack_loss'] = 'target'\r\n if norm == 'inf':\r\n norm_name = 'i'\r\n else:\r\n norm_name = '2'\r\n if params['tv_flag']:\r\n tv_name = '_tv'\r\n else:\r\n tv_name = ''\r\n if params['cos_flag']:\r\n cos_name = '_cos'\r\n else:\r\n cos_name = ''\r\n\r\n params['model_name'] = '{}_{}'.format(model_type, loss_type)\r\n if dataset_type == 'casia' or dataset_type == 'vggsmall':\r\n params['model_name'] = dataset_type\r\n params['attack_name'] = '{}_l{}{}{}'.format(attack.lower(), norm_name, tv_name, cos_name)\r\n\r\n return params", "def setParameters(self, params):\n self.module._setParameters(params)\n # update parameters for learner\n self.learner.setModule(self.module)", "def set_parameters(self, create_models=True, **parameters):\n flag_nn_opti = False\n\n # Set attributes\n for param, value in parameters.items():\n if param in self.DEFAULT_VALUES.keys():\n if getattr(self, param) != value:\n # We change param value\n setattr(self, param, value)\n if param in ['hidden_layers', 'lr']:\n flag_nn_opti = True\n\n else:\n raise Exception(f'Parameter {param} not known.')\n\n # Create torch instances\n if create_models and flag_nn_opti:\n self._create_networks_and_optimizer()", "def define_parameters(act_func, dropout, fc_layer_units, labels, samples):\n\n\n\n # Define a training and test set\n test_size = 0.1 # training is set on 90%\n training_vec, test_vec, training_labels, test_labels = train_test_split(samples, labels, test_size=test_size)\n\n # Get the batch size\n batch_percentage = 0.1 # There is chosen to use a batch size of 10%\n batch_size = int(training_vec.shape[0] * batch_percentage)\n\n # Get the number of features\n feature_number = training_vec.shape[1]\n\n # Get the number of classes\n class_number = len(np.unique(labels))\n\n # Get the layer nodes in correct format\n int_layer_units = []\n units = fc_layer_units.split(',')\n for unit in units:\n int_layer_units.append(int(unit))\n\n # Get the dropout layers in correct format\n dropout_booleans = []\n dropouts = dropout.split(',')\n for layer in dropouts:\n layer = layer.lower()\n if layer == 'f' or layer == 'false':\n dropout_booleans.append(False)\n else:\n dropout_booleans.append(True)\n\n # Get the layer names of the neural network architecture\n layers = []\n for index, nodes in enumerate(int_layer_units):\n layers.append('fc ({})'.format(nodes))\n if dropout_booleans[index]:\n layers.append('do')\n layers = ' - '.join(layers)\n\n # Get the right activation function\n act_func = act_func.lower()\n if act_func == 'sigmoid' or act_func == 'sig' or act_func == 's':\n act_func = tf.nn.sigmoid\n act_title = 'sigmoid'\n elif act_func == 'relu' or act_func == 'r':\n act_func = tf.nn.relu\n act_title = 'ReLU'\n elif act_func == 'tanh' or act_func == 'tan' or act_func == 't':\n act_func = tf.tanh\n act_title = 'tanH'\n else:\n act_func = None\n act_title = 'none'\n\n return act_func, act_title, batch_size, class_number, feature_number, layers, dropout_booleans, int_layer_units, \\\n test_labels, test_vec, training_labels, training_vec", "def set_tunable_params(self, params):\n self._tunable_params = params", "def set_parameters(self):\n\n if self.model_with_set_params:\n return\n\n self._model_with_set_params = self._parameter_values.process_model(\n self._unprocessed_model, inplace=False\n )\n self._parameter_values.process_geometry(self.geometry)\n self.model = self._model_with_set_params", "def set_params(self, **params):\n self.check_params(params)\n self.sk_params.update(params)\n return self", "def __call__(self, params):\n logging.info('Running __call__ function...')\n batch_size = self._train_batch_size\n # For MCTS, the number of features for each trajecotry is unknown beforehand\n num_features = None\n\n if self._global_step_value % self._iterations_per_loop == 0:\n logging.info('Update iterator (gs=%d)...', self._global_step_value)\n # Feature/Labels Placeholders\n self.features_ph = {\n 'mcts_features':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_state_space],\n name='mcts_state_ph'),\n 'policy_features':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_state_space],\n name='policy_state_ph'),\n }\n self.labels_ph = {\n 'action_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='action_ph'),\n 'value_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='value_ph'),\n 'return_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='return_ph'),\n 'old_neg_logprob_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='old_neg'),\n 'mean_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='mean_ph'),\n 'logstd_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='logstd_ph'),\n 'mcts_enable_tensor':\n tf.placeholder(\n tf.bool, shape=[num_features], name='mcts_enable_ph'),\n 'policy_action_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='policy_action_ph'),\n 'policy_value_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='policy_value_ph'),\n 'policy_return_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='policy_return_ph'),\n 'policy_old_neg_logprob_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='policy_old_neg'),\n }\n # Create the dataset\n dataset = tf.data.Dataset.from_tensor_slices(\n (self.features_ph, self.labels_ph))\n dataset = dataset.shuffle(buffer_size=self._max_horizon)\n dataset = dataset.batch(batch_size, drop_remainder=True)\n\n # repeat until the loop is done\n dataset = dataset.repeat()\n if self._use_tpu:\n dataset = dataset.map(functools.partial(self._set_shapes, batch_size))\n dataset = dataset.prefetch(2)\n self._iterator = dataset.make_initializable_iterator()\n return self._iterator.get_next()\n else:\n return self._iterator.get_next()", "def update(self, parameters):\n self.set_frequencies(parameters) # f_i\n self.set_coupling_weights(parameters) # w_ij\n self.set_phase_bias(parameters) # theta_i\n self.set_amplitudes_rate(parameters) # a_i\n self.set_nominal_amplitudes(parameters) # R_i", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def set_params(self, **kwargs):\n\n kw_keys = list(kwargs)\n\n if 'alpha' in kw_keys:\n self.alpha = kwargs['alpha']\n\n if 'beta' in kw_keys:\n self.beta = kwargs['beta']\n\n if 'gamma' in kw_keys: \n \tself.gamma = kwargs['gamma']\n\n if 'epsilon' in kw_keys:\n self.epsilon = kwargs['epsilon']\n \n self.nact = self.highbound-self.lowbound\n self.actions = np.arange(self.nact)", "def set_profile_params(self, params):\n self.alpha = params[0]\n self.beta = params[1]\n self.gamma = params[2]\n self.c500 = params[3]\n self.P0 = params[4]\n self.M500 = params[5]\n self.r500 = params[6]\n self.z = params[7]", "def initializeParameters(self):\n\n self.params[2].value = False\n self.params[3].enabled = False\n self.params[7].value = True\n self.params[7].enabled = False\n self.params[8].value = None\n self.params[8].enabled = False", "def initialise_parameters(self):\n # Weights\n init = select_w_init(self.w_init)\n if self.w_gain:\n gain = nn.init.calculate_gain('relu')\n init(self.relations, gain=gain)\n else:\n init(self.relations)\n\n # Biases\n if self.b_init:\n init = select_b_init(self.b_init)\n init(self.sbias)\n init(self.pbias)\n init(self.obias)", "def reset_parameters(self):\n init_method = getattr(init, self.initialization)\n for layer in range(self.num_layers):\n fc = self.get_fc(layer)\n init_method(fc.weight.data)\n if self.use_bias:\n init.constant(fc.bias.data, val=0)\n init_method(self.out.weight.data)\n init.constant(self.out.bias.data, val=0)", "def __init__(self, **kwargs):\n super().__init__()\n self.model_conv = models.resnet50(pretrained=True)\n for param in self.model_conv.parameters():\n param.requires_grad = False\n num_ftrs = self.model_conv.fc.in_features\n num_classes = 10\n self.model_conv.fc = nn.Linear(num_ftrs, num_classes)", "def set_params(model, params): # type: ignore\n for p, p_new in zip(model.parameters(), params):\n p.data = p_new.data", "def init_params(self):\n self.params = Parameters()\n self.params.add('qoff', self.qoff, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('yscale', self.yscale, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('int_bg', self.int_bg, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('Rc', self.Rc, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('sur_den', self.sur_den, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('ion_depth', self.ion_depth, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)", "def set_tunable_parameters(self, n_latent_features=1024, reduced_size=64, input_type='default', **kwargs):\n\n self.logger.info(\"Set parameters: n_latent_features=%s, reduced_size=%s, input_type=%s\", n_latent_features,\n reduced_size, input_type)\n\n self.n_latent_features = n_latent_features\n self.reduced_size = reduced_size\n self.input_type = input_type\n\n self._construct_model()", "def transfer_parameters_call_optimization(self, mainCl, mainPt, consCall=None, consPt=None):", "def set_parameter(self, output):\n self.model.set_parameter(output);", "def define_parameters(self):", "def init_parameters(obj, hyperparameters):\n # Initialize Global Configuration Parameter\n params = hyperparameters['global']\n setattr(obj, 'param', params)\n\n # Initialize Attributes (Pre-Checked Parameters)\n setattr(obj, 'learning_rate', params['learning_rate'])\n setattr(obj, 'loss', params['loss'])\n setattr(obj, 'max_iter', params['max_iter'])\n\n if params['loss'] == 'least_squares':\n setattr(obj, 'num_classes', 1)\n elif params['loss'] in ['binary_crossentropy', 'categorical_crossentropy', 'auto']:\n setattr(obj, 'num_classes', params['num_classes'])\n\n # Initialize Attributes (Optional Values - Based on Default Parameters)\n if 'l2_regularization' not in params or params['l2_regularization'] is None:\n setattr(obj, 'l2_regularization', 0)\n else:\n setattr(obj, 'l2_regularization', params['l2_regularization'])\n\n if 'max_bins' not in params:\n setattr(obj, 'max_bins', 255)\n else:\n setattr(obj, 'max_bins', params['max_bins'])\n\n if 'max_depth' not in params or params['max_depth'] is None:\n setattr(obj, 'max_depth', None)\n else:\n setattr(obj, 'max_depth', params['max_depth'])\n\n if 'max_leaf_nodes' not in params or params['max_leaf_nodes'] is None:\n setattr(obj, 'max_leaf_nodes', 31)\n else:\n setattr(obj, 'max_leaf_nodes', params['max_leaf_nodes'])\n\n if 'min_samples_leaf' not in params or params['min_samples_leaf'] is None:\n setattr(obj, 'min_samples_leaf', 20)\n else:\n setattr(obj, 'min_samples_leaf', params['min_samples_leaf'])\n\n if 'random_state' in params:\n setattr(obj, 'random_state', params['random_state'])\n else:\n setattr(obj, 'random_state', None)\n\n if 'scoring' in params:\n setattr(obj, 'scoring', params['scoring'])\n else:\n setattr(obj, 'scoring', None)\n\n if 'verbose' not in params or params['verbose'] is None:\n setattr(obj, 'verbose', False)\n else:\n setattr(obj, 'verbose', True)\n\n return obj", "def test_set_params_2():\n tpot_obj = TPOTClassifier(generations=2)\n tpot_obj.set_params(generations=3)\n\n assert tpot_obj.generations == 3", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")", "def set_fit_params(self):\n\n self.p0 = np.array([self.A_arr, self.T_a])\n # initial guess at A_arr and T_a\n\n self.popt, self.pcov = curve_fit(\n self.get_eta_fit, self.T_exp, self.eta_exp, p0=self.p0\n )\n\n self.A_arr = self.popt[0]\n self.T_a = self.popt[1]\n\n self.T_array = self.T_model", "def Params(cls):\n p = super().Params()\n p.Define('train_task', None, 'Underlying task')\n p.Define('decode_task', None, 'Underlying task')\n p.Define('train_dataset_name', None, '')\n p.Define('decode_dataset_name', None, '')\n p.Define('train_steps_per_loop', 0, '')\n p.Define('decode_steps_per_loop', 0, '')\n return p", "def _setup_misc(self, mode):\n self.lr_rate_ph = tf.Variable(0.0, name='lrn_rate', trainable=False)\n self.reuse = None if (mode == 'train') else True\n self.batch_size = self.hparams.batch_size\n if mode == 'eval':\n self.batch_size = 25", "def set_params(self, **params):\n if not hasattr(self, \"_non_sklearn_base\"):\n return super().set_params(**params)\n if not (\n len(params) == 1 and\n (\"nthreads\" in params or \"n_jobs\" in params)\n ):\n self.is_fitted_ = False\n valid_params = self.get_params(deep=False)\n for k,v in params.items():\n if k not in valid_params:\n raise ValueError(\"Invalid parameter: \", k)\n setattr(self, k, v)\n return self", "def __init__(self, encut, ldaul, Uparam, Jparam, name=\"DFTU_settings\"):\n\n dftu_settings = {\"LDAU\": \".TRUE.\" , \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LADAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIX\": 4}\n InputParameters.__init__(self, name=name, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"ENCUT\", encut)", "def __init__(self, encut, magmom, ldaul, Uparam, Jparam, name=\"DFTCL_settings\"):\n\n cl_settings = {\"ISPIN\": 2, \"MAGMOM\": magmom, \"SAXIS\": None, \"LSORBIT\": None, \"LNONCOLLINEAR\": None}\n dftu_settings = {\"LDAU\": \".TRUE.\", \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LDAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIMX\": 4}\n InputParameters.__init__(self, name=name, magnetic_settings=cl_settings, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"encut\", encut)", "def set_default_params(self, opt):\n self.config.embed_dim = opt.embed_dim or 200\n self.config.rnn_size = opt.rnn_size or 512\n self.config.nrnn_layer = opt.nrnn_layer or 2\n self.config.rnn_dropout = opt.rnn_dropout or 0.5\n self.config.rnnout_dim = 2 * self.config.rnn_size * self.config.nrnn_layer\n ## MULTIMODAL (ATTENTION)\n self.config.cnnout_dim = opt.cnnout_dim or 512\n self.config.cnnout_w = opt.cnnout_w or 14\n self.config.cnnout_h = opt.cnnout_h or 14\n self.config.cnnout_spat = self.config.cnnout_w * self.config.cnnout_h\n self.config.multfeat_dim = opt.multfeat_dim or 512\n self.config.attfeat_dim = opt.attfeat_dim or 256\n self.config.netout_dim = opt.answer_size\n ## [attlstm] in: {2*multfeat_dim, att_rnn_s_dim} {att_rnn_size, att_rnn_s_dim}\n self.config.att_rnn_size = opt.att_rnn_size or 512\n self.config.att_rnn_nlayer = opt.att_rnn_nlayer or 1\n self.config.att_rnn_dropout = opt.att_rnn_dropout or 0.0\n # TODO: There could be a protential bugs if self.config.att_rnn_nlayer > 1\n assert(self.config.att_rnn_nlayer == 1)\n self.config.att_rnn_s_dim = self.config.att_rnn_size * self.config.att_rnn_nlayer\n\n # optimization\n self.config.max_grad_norm = opt.max_grad_norm or 0.1\n self.config.initializer_scale = 0.008", "def __init__(self, train_x, train_y, test_x, test_y, Tunning_Cs=[0.001, 0.01, 0.1, 1, 10]): \n self.Cs = Tunning_Cs\n self.train_x = train_x\n self.train_y = train_y\n self.test_x = test_x \n self.test_y = test_y\n self.model = svm.SVR(kernel='rbf', gamma='auto')", "def __init__(self, encut, name=\"scf_settings\"):\n InputParameters.__init__(self, name=name)\n self.update_electronic_settings(\"ENCUT\", encut)", "def tuned_for_ec():\n # TODO(theosanderson): update these to true SOTA values\n hparams = contrib_training.HParams()\n hparams.add_hparam('gradient_clipping_decay', 0.9999)\n hparams.add_hparam('batch_style', 'bucket')\n hparams.add_hparam('batch_size', 34)\n hparams.add_hparam('dilation_rate', 5)\n hparams.add_hparam('filters', 411)\n hparams.add_hparam('first_dilated_layer', 1) # This is 0-indexed\n hparams.add_hparam('kernel_size', 7)\n hparams.add_hparam('num_layers', 5)\n hparams.add_hparam('pooling', 'mean')\n hparams.add_hparam('resnet_bottleneck_factor', 0.88152)\n hparams.add_hparam('lr_decay_rate', 0.9977)\n hparams.add_hparam('learning_rate', 0.00028748)\n hparams.add_hparam('decision_threshold', 0.3746)\n hparams.add_hparam('denominator_power', 0.88)\n\n hparams.add_hparam('train_steps', 650000)\n return hparams", "def set_fe_params(self, fe_params):\n self._params[0:self.k_fe] = fe_params", "def set_params(self, params_):\n x_start, x_end = params_[\"lim_fit\"]\n self.find_idx_of_fit_limit(x_start, x_end)\n self.is_error_bar_for_fit = params_[\"use_error_bar\"]\n self.fitting_method1 = params_[\"method1\"]\n self.fitting_method2 = params_[\"method2\"]\n self.qty_to_min = params_[\"qty_to_min\"]\n\n for i, key in enumerate(self.params):\n # self.params[key].set(value=params_[\"val\"][i], min=params_[\"min\"][i], max=params_[\"max\"][i], vary=bool(params_[\"hold\"][i]), brute_step=params_[\"brute_step\"][i])\n if self.params[key].user_data is not None:\n if \"dontGenerate\" in self.params[key].user_data:\n continue\n self.params[key].set(value=params_[key][\"value\"], min=params_[key][\"min\"], max=params_[key][\"max\"], vary=params_[key][\"vary\"], brute_step=params_[key][\"b_step\"])", "def _set_params(self, estimator_args, scaler_args, execution_args, metric_args=None, dim_reduction_args=None):\n \n # Set default values which will be used if execution arguments are not passed\n \n # Default parameters:\n self.model.overwrite = True\n self.model.debug = False\n self.model.test_size = 0.33\n self.model.cv = 0\n self.model.time_series_split = 0\n self.model.max_train_size = None\n self.model.random_state = 42\n self.model.compress = 3\n self.model.retain_data = False\n self.model.scale_hashed = True\n self.model.scale_vectors = True\n self.model.scaler = \"StandardScaler\"\n self.model.scaler_kwargs = {}\n self.model.estimator_kwargs = {}\n self.model.missing = \"zeros\"\n self.model.calc_feature_importances = False\n self.model.importances_n_repeats = 30\n self.model.lags= None\n self.model.lag_target = False\n self.model.scale_target = False\n self.model.scale_lag_target= True\n self.model.make_stationary = None\n self.model.stationarity_lags = [1]\n self.model.using_keras = False\n self.model.current_sample_as_input = True\n self.model.prediction_periods = 1\n \n # Default metric parameters:\n if metric_args is None:\n self.model.metric_args = {}\n \n # Set execution parameters\n \n # If the execution key word arguments were included in the request, get the parameters and values\n if len(execution_args) > 0:\n \n # Transform the string of arguments into a dictionary\n execution_args = utils.get_kwargs(execution_args)\n \n # Set the overwite parameter if any existing model with the specified name should be overwritten\n if 'overwrite' in execution_args:\n self.model.overwrite = 'true' == execution_args['overwrite'].lower()\n \n # Set the test_size parameter that will be used to split the samples into training and testing data sets\n # Default value is 0.33, i.e. we use 66% of the samples for training and 33% for testing\n if 'test_size' in execution_args:\n self.model.test_size = utils.atof(execution_args['test_size'])\n\n # Enable K-fold cross validation. For more information see: http://scikit-learn.org/stable/modules/cross_validation.html#multimetric-cross-validation\n # Default value is 0 in which case a simple holdout strategy based on the test_size parameter is used.\n # If cv > 0 then the model is validated used K = cv folds and the test_size parameter is ignored.\n if 'cv' in execution_args:\n self.model.cv = utils.atoi(execution_args['cv'])\n \n # Enable timeseries backtesting using TimeSeriesSplit. https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.TimeSeriesSplit.html\n # This will select the a validation strategy appropriate for time series and sequential data.\n # The feature definitions must include an 'identifier' field which can be used to sort the series into the correct order.\n # The integer supplied in this parameter will split the data into the given number of subsets for training and testing.\n if 'time_series_split' in execution_args:\n self.model.time_series_split = utils.atoi(execution_args['time_series_split'])\n\n # This parameter can be used together with time_series_split.\n # It specifies the maximum samples to be used for training in each split, which allows for rolling/ walk forward validation.\n if 'max_train_size' in execution_args:\n self.model.max_train_size = utils.atoi(execution_args['max_train_size'])\n\n # Add lag observations to the feature matrix. Only applicable for Keras models.\n # An identifier field must be included in the feature definitions to correctly sort the data for this capability.\n # For e.g. if lags=2, features from the previous two samples will be concatenated as input features for the current sample.\n # This is useful for framing timeseries and sequence prediction problems into 3D or 4D data required for deep learning.\n if 'lags' in execution_args:\n self.model.lags = utils.atoi(execution_args['lags'])\n\n # Include targets in the lag observations\n # If True an additional feature will be created for each sample using the previous value of y \n if 'lag_target' in execution_args:\n self.model.lag_target = 'true' == execution_args['lag_target'].lower()\n \n # Scale the target before fitting\n # The scaling will be inversed before predictions so they are returned in the original scale \n if 'scale_target' in execution_args:\n self.model.scale_target = 'true' == execution_args['scale_target'].lower()\n\n # Scale lag values of the targets before fitting\n # Even if scale_target is set to false, the lag values of targets being used as features can be scaled by setting this to true \n if 'scale_lag_target' in execution_args:\n self.model.scale_lag_target = 'true' == execution_args['scale_lag_target'].lower()\n\n # Make the target series more stationary. This only applies to sequence prediction problems.\n # Valid values are 'log' in which case we apply a logarithm to the target values,\n # or 'difference' in which case we transform the targets into variance from the previous value.\n # The transformation will be reversed before returning predictions.\n if 'make_stationary' in execution_args:\n self.model.make_stationary = execution_args['make_stationary'].lower()\n\n # Provide lags periods for differencing\n # By default the difference will be done with lag = 1. Alternate lags can be provided by passing a list of lags as a list.\n # e.g. 'stationarity_lags=1;12|list|int'\n if 'stationarity_lags' in execution_args:\n self.model.stationarity_lags = utils.get_kwargs_by_type({'stationarity_lags': execution_args['stationarity_lags']})['stationarity_lags']\n\n # Specify if the current sample should be used as input to the model\n # This is to allow for models that only use lag observations to make future predictions\n if 'current_sample_as_input' in execution_args:\n self.model.current_sample_as_input = 'true' == execution_args['current_sample_as_input'].lower()\n\n # Specify the number of predictions expected from the model\n # This can be used to get a model to predict the next m periods given inputs for the previous n periods.\n # This is only valid for Keras models which have a final output layer with more than one node\n if 'prediction_periods' in execution_args:\n self.model.prediction_periods = utils.atoi(execution_args['prediction_periods'])\n \n # Seed used by the random number generator when generating the training testing split\n if 'random_state' in execution_args:\n self.model.random_state = utils.atoi(execution_args['random_state'])\n \n # Compression level between 1-9 used by joblib when saving the model\n if 'compress' in execution_args:\n self.model.compress = utils.atoi(execution_args['compress'])\n \n # Flag to determine if the training and test data should be saved in the model\n if 'retain_data' in execution_args:\n self.model.retain_data = 'true' == execution_args['retain_data'].lower()\n\n # Flag to determine if feature importances should be calculated when the fit method is called\n if 'calculate_importances' in execution_args:\n self.model.calc_feature_importances = 'true' == execution_args['calculate_importances'].lower()\n\n # Sets the number of times a feature is randomly shuffled during the feature importance calculation\n if 'importances_n_repeats' in execution_args:\n self.model.importances_n_repeats = utils.atoi(execution_args['importances_n_repeats'])\n \n # Set the debug option for generating execution logs\n # Valid values are: true, false\n if 'debug' in execution_args:\n self.model.debug = 'true' == execution_args['debug'].lower()\n \n # Additional information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n # Increment log counter for the class. Each instance of the class generates a new log.\n self.__class__.log_no += 1\n\n # Create a log file for the instance\n # Logs will be stored in ..\\logs\\SKLearn Log <n>.txt\n self.logfile = os.path.join(os.getcwd(), 'logs', 'SKLearn Log {}.txt'.format(self.log_no))\n \n # Create dictionary of parameters to display for debug\n self.exec_params = {\"overwrite\":self.model.overwrite, \"test_size\":self.model.test_size, \"cv\":self.model.cv,\\\n \"time_series_split\": self.model.time_series_split, \"max_train_size\":self.model.max_train_size, \"lags\":self.model.lags,\\\n \"lag_target\":self.model.lag_target, \"scale_target\":self.model.scale_target, \"make_stationary\":self.model.make_stationary,\\\n \"random_state\":self.model.random_state, \"compress\":self.model.compress, \"retain_data\":self.model.retain_data,\\\n \"calculate_importances\": self.model.calc_feature_importances, \"importances_n_repeats\": self.model.importances_n_repeats,\\\n \"debug\":self.model.debug}\n\n self._print_log(1)\n \n # If the scaler key word arguments were included in the request, get the parameters and values\n if len(scaler_args) > 0:\n \n # Transform the string of arguments into a dictionary\n scaler_args = utils.get_kwargs(scaler_args)\n \n # Set scaler arguments that will be used when preprocessing the data\n # Valid values are: StandardScaler, MinMaxScaler, MaxAbsScaler, RobustScaler and QuantileTransformer\n # More information here: http://scikit-learn.org/stable/modules/preprocessing.html\n if 'scaler' in scaler_args:\n self.model.scaler = scaler_args.pop('scaler')\n \n if 'missing' in scaler_args:\n self.model.missing = scaler_args.pop('missing').lower()\n \n if 'scale_hashed' in scaler_args:\n self.model.scale_hashed = 'true' == scaler_args.pop('scale_hashed').lower()\n \n if 'scale_vectors' in scaler_args:\n self.model.scale_vectors = 'true' == scaler_args.pop('scale_vectors').lower()\n \n # Get the rest of the scaler parameters, converting values to the correct data type\n self.model.scaler_kwargs = utils.get_kwargs_by_type(scaler_args) \n else:\n err = \"Arguments for scaling did not include the scaler name e.g StandardScaler\"\n raise Exception(err)\n \n # If the estimator key word arguments were included in the request, get the parameters and values\n if len(estimator_args) > 0:\n \n # Transform the string of arguments into a dictionary\n estimator_args = utils.get_kwargs(estimator_args)\n \n # Set estimator arguments that will be used when preprocessing the data\n # The parameters available will depend on the selected estimator\n # More information here: http://scikit-learn.org/stable/modules/classes.html#api-reference\n if 'estimator' in estimator_args:\n self.model.estimator = estimator_args.pop('estimator')\n \n # Set the estimator type for the model\n if self.model.estimator in self.classifiers:\n self.model.estimator_type = \"classifier\"\n elif self.model.estimator in self.regressors:\n self.model.estimator_type = \"regressor\"\n elif self.model.estimator in self.decomposers:\n self.model.estimator_type = \"decomposer\"\n elif self.model.estimator in self.clusterers:\n self.model.estimator_type = \"clusterer\"\n else:\n err = \"Unknown estimator class: {0}\".format(self.model.estimator)\n raise Exception(err)\n\n # Get the rest of the estimator parameters, converting values to the correct data type\n self.model.estimator_kwargs = utils.get_kwargs_by_type(estimator_args) \n else:\n err = \"Arguments for estimator did not include the estimator class e.g. RandomForestClassifier\"\n raise Exception(err)\n \n # If key word arguments for model evaluation metrics are included in the request, get the parameters and values\n if metric_args is not None and len(metric_args) > 0:\n # Transform the string of arguments into a dictionary\n metric_args = utils.get_kwargs(metric_args)\n \n # Get the metric parameters, converting values to the correct data type\n self.model.metric_args = utils.get_kwargs_by_type(metric_args) \n \n # If key word arguments for dimensionality reduction are included in the request, get the parameters and values\n if dim_reduction_args is not None and len(dim_reduction_args) > 0:\n # Transform the string of arguments into a dictionary\n dim_reduction_args = utils.get_kwargs(dim_reduction_args)\n \n # Set dim_reduction arguments that will be used after preprocessing the data\n # The parameters available will depend on the selected dimensionality reduction method\n # Acceptable classes are PCA, KernelPCA, IncrementalPCA, TruncatedSVD\n # More information here: http://scikit-learn.org/stable/modules/classes.html#api-reference\n if 'reduction' in dim_reduction_args:\n self.model.reduction = dim_reduction_args.pop('reduction')\n \n # Get the rest of the dim_reduction parameters, converting values to the correct data type\n self.model.dim_reduction_args = utils.get_kwargs_by_type(dim_reduction_args) \n else:\n err = \"Arguments for dimensionality reduction did not include the class e.g. PCA\"\n raise Exception(err)\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(2)", "def define_parameters(all_features, act_func, dropout, fc_layer_units, training_samples, batch_perc):\n\n # Get the variables in the right format\n all_features = all_features.lower()\n if all_features == 'y' or all_features == 'yes':\n all_features = \"all conservation-based scores\"\n else:\n all_features = \"only PhastCon primate scores\"\n\n # Get the batch size\n batch_size = int(training_samples.shape[0] * batch_perc)\n\n # Get the layer nodes in correct format\n int_layer_units = []\n units = fc_layer_units.split(',')\n for unit in units:\n int_layer_units.append(int(unit))\n\n # Get the dropout layers in correct format\n dropout_booleans = []\n dropouts = dropout.split(',')\n for layer in dropouts:\n layer = layer.lower()\n if layer == 'f' or layer == 'false':\n dropout_booleans.append(False)\n else:\n dropout_booleans.append(True)\n\n # Get the layer names of the neural network architecture\n layers = []\n for index, nodes in enumerate(int_layer_units):\n layers.append('fc ({})'.format(nodes))\n if dropout_booleans[index]:\n layers.append('do')\n layers = ' - '.join(layers)\n\n # Get the right activation function\n act_func = act_func.lower()\n if act_func == 'sigmoid' or act_func == 'sig' or act_func == 's':\n act_func = tf.nn.sigmoid\n act_title = 'sigmoid'\n elif act_func == 'relu' or act_func == 'r':\n act_func = tf.nn.relu\n act_title = 'ReLU'\n elif act_func == 'tanh' or act_func == 'tan' or act_func == 't':\n act_func = tf.tanh\n act_title = 'tanH'\n else:\n act_func = None\n act_title = 'none'\n\n return all_features, act_func, act_title, batch_size, layers, dropout_booleans, int_layer_units", "def set_params(self, **kwargs) -> NoReturn:\n pass", "def resetParams(self):\n self.prediction = cons.init_pred # Classifier payoff - initialized to a constant initial payoff value\n self.error = cons.init_err # Classifier error - initialized to a constant initial error value\n self.fitness = cons.init_fit # Classifier fitness - initialized to a constant initial fitness value", "def setParams(self, paramSet):\r\n pass", "def _config_set(self):\n p = self._params\n self._config = tf.estimator.RunConfig(save_checkpoints_steps = p.save_checkpoints_steps,\n keep_checkpoint_max = p.keep_checkpoint_max,\n save_summary_steps = p.save_summary_steps\n )", "def updateData(self, *args):\n # if self.move_next_option == \"R\":\n # self.restSampling()\n # elif self.move_next_option == \"A\":\n # self.addExtra()\n # else:\n # self.continueReview()\n for name, value in self.parameter_inputs.items():\n self.parameters[name] = value.value\n # directly change the value of class variables\n logMsg((\"update settings: \", self.ml_classifier_cls, name, value.value))\n setattr(self.ml_classifier_cls, name, value.value)\n\n pass", "def set_parameters(self, mode, data):\n if mode == 'design' or self.local_design:\n self.new_design = True\n\n for key, dc in self.variables.items():\n if isinstance(dc, dc_cp):\n if ((mode == 'offdesign' and not self.local_design) or\n (mode == 'design' and self.local_offdesign)):\n self.get_attr(key).design = data[key]\n\n else:\n self.get_attr(key).design = np.nan", "def set_model_parameters(self, cluster, model):\n\n\t\tmodel.maxnum=int(round(len(cluster)/(self.read_depth*0.9)))\n\t\tmodel.minnum=int(round(len(cluster)/(self.read_depth*1.1)))\n\t\tmodel.minsize=int(round(len(cluster)/(self.read_depth*0.9)))\n\t\tmodel.expcov=int(self.read_depth)\n\t\tmodel.maxcopy = self.max_copy", "def set_params(self, params: Dict) -> None:\n self.leak.set_g(params[\"g_leak\"])\n self.kvhh.set_g(params[\"g_kvhh\"])\n self.cav.set_g(params[\"g_cav\"])\n self.kca.set_g(params[\"g_kca\"])\n self.nap.set_g(params[\"g_nap\"])\n self.tau_ca = params[\"t_ca\"]", "def set_load_model_parameters(self):\n\n self.controller.set_new_model_test_input_path(self.test_input.get())\n self.controller.set_new_model_results_input_path(self.results_input.get())\n self.controller.set_new_model_running(False)", "def setParams(self, inputCol=None, outputCol=None, graph=None,\n inputTensor=utils.IMAGE_INPUT_PLACEHOLDER_NAME, outputTensor=None,\n outputMode=\"vector\"):\n kwargs = self._input_kwargs\n return self._set(**kwargs)", "def _set_params_initializer(self, hparams, mode, scope):\n\t\tself.mode = mode\n\t\t# pre_train flag is used for distinguish with pre_train and fine tune\n\t\tif hparams.enable_vae:\n\t\t\t_info('Enable VAE')\n\t\t\tself.enable_vae = True\n\t\t\tself.pre_train = hparams.pre_train\n\t\telse:\n\t\t\tself.enable_vae = False\n\t\t\tself.pre_train = False\n\t\tself.dtype = tf.float32\n\t\tself.global_step = tf.Variable(0, trainable=False)\n\n\t\t# define the input for the model\n\t\tself.encoder_input_data = tf.placeholder(\n\t\t\ttf.int32, [None, None], name='encoder_input_data')\n\t\tself.decoder_input_data = tf.placeholder(\n\t\t\ttf.int32, [None, None], name='decoder_input_data')\n\t\tself.decoder_output_data = tf.placeholder(\n\t\t\ttf.int32, [None, None], name='decoder_output_data')\n\t\tself.seq_length_encoder_input_data = tf.placeholder(\n\t\t\ttf.int32, [None], name='seq_length_encoder_input_data')\n\t\tself.seq_length_decoder_input_data = tf.placeholder(\n\t\t\ttf.int32, [None], name='seq_length_decoder_input_data')\n\t\t\n\t\t# load some important hparamters\n\t\tself.unit_type = hparams.unit_type\n\t\tself.num_units = hparams.num_units\n\t\tself.num_encoder_layers = hparams.num_encoder_layers\n\t\tself.num_decoder_layers = hparams.num_decoder_layers\n\t\tself.num_encoder_residual_layers = self.num_encoder_layers - 1\n\t\tself.num_decoder_residual_layers = self.num_decoder_layers - 1\n\n\t\tself.batch_size = tf.size(self.seq_length_encoder_input_data)\n\n\t\t# set initializer\n\t\trandom_seed = hparams.random_seed\n\t\tinitializer = _mh.get_initializer(hparams.init_op, random_seed, hparams.init_weight)\n\t\ttf.get_variable_scope().set_initializer(initializer)\n\n\t\t# embeddings\n\t\tself.src_vocab_size = hparams.src_vocab_size\n\t\tself.tgt_vocab_size = hparams.tgt_vocab_size\n\t\tself.init_embeddings(hparams, scope)", "def test_set_params():\n\n tpot_obj = TPOTClassifier()\n assert tpot_obj.set_params() is tpot_obj", "def set_parameters(self, weight, bias=None):\n self.weight = torch.nn.Parameter(torch.as_tensor(weight))\n if bias is not None:\n self.bias = torch.nn.Parameter(torch.as_tensor(bias))", "def set_parameters(self, weight, bias=None):\r\n self.weight = torch.nn.Parameter(torch.as_tensor(weight))\r\n if bias is not None:\r\n self.bias = torch.nn.Parameter(torch.as_tensor(bias))", "def __init__(self, data, params):\n super(VBPR, self).__init__(data, params)\n self.embed_k = self.params.embed_k\n self.embed_d = self.params.embed_d\n self.learning_rate = self.params.lr\n\n self.directory_parameters = f'batch_{self.params.batch_size}' \\\n f'-D_{self.params.embed_d}' \\\n f'-K_{self.params.embed_k}' \\\n f'-lr_{self.params.lr}' \\\n f'-reg_{self.params.reg}'\n\n self.process_cnn_visual_features()\n\n # Initialize Model Parameters\n self.Bp = tf.Variable(\n self.initializer(shape=[self.dim_cnn_features, 1]), name='Bp', dtype=tf.float32)\n self.Tu = tf.Variable(\n self.initializer(shape=[self.num_users, self.embed_d]),\n name='Tu', dtype=tf.float32) # (users, low_embedding_size)\n self.F = tf.Variable(\n self.cnn_features,\n name='F', dtype=tf.float32, trainable=False)\n self.E = tf.Variable(\n self.initializer(shape=[self.dim_cnn_features, self.embed_d]),\n name='E', dtype=tf.float32) # (items, low_embedding_size)\n\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate)\n self.saver_ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self)", "def init_params(self, parameters):\r\n max_epoch = parameters['num_epoch']\r\n momentum_rate = parameters['momentum']\r\n loss = parameters['loss_function']\r\n accuracy = parameters['accuracy']\r\n regularization = parameters['regularization']\r\n batch_size = parameters['batch_size']\r\n optimizer = parameters['optimizer'] if parameters['optimizer'] is not None else 'batch'\r\n self.__init__(max_epoch, optimizer, loss, accuracy, momentum_rate, regularization, batch_size)", "def params_init(self) -> None:\n # Initialize weights and biases with uniform distribution.\n nn.init.uniform_(self.emb.weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].bias, self.init_lower, self.init_upper)\n for lyr in range(self.n_lyr):\n self.stack_rnn[2 * lyr].params_init()\n nn.init.uniform_(self.fc_h2e[0].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_h2e[0].bias, self.init_lower, self.init_upper)", "def copy_params(self):\n tf.get_default_session().run(self.copy_ops)", "def set_parameters(self, f_ext=0):\n self.F_EXT = f_ext" ]
[ "0.7162809", "0.6975917", "0.69572663", "0.69175005", "0.67739886", "0.6729018", "0.67094076", "0.66425633", "0.6621363", "0.65588087", "0.6556437", "0.6530796", "0.6530384", "0.6530384", "0.6520774", "0.65118974", "0.6503824", "0.64620966", "0.6427614", "0.64271015", "0.6417769", "0.6396897", "0.6351569", "0.6344188", "0.63168454", "0.63034314", "0.62854767", "0.6283417", "0.62797713", "0.6279722", "0.6272628", "0.626377", "0.6255415", "0.6246718", "0.62423754", "0.6228951", "0.6218327", "0.62118715", "0.62002796", "0.61763453", "0.6153377", "0.61399597", "0.613271", "0.61031556", "0.6090692", "0.60744464", "0.6041494", "0.6039239", "0.60335094", "0.60253125", "0.60199755", "0.6014778", "0.5998157", "0.59933126", "0.5988451", "0.5985132", "0.5981576", "0.5980487", "0.59720945", "0.5949943", "0.59272164", "0.59145474", "0.59137845", "0.59106916", "0.5909916", "0.59040624", "0.5903571", "0.58800256", "0.58785504", "0.58782417", "0.58773035", "0.5875145", "0.5871596", "0.5864489", "0.5857905", "0.58430725", "0.58423537", "0.5833399", "0.5830582", "0.5828668", "0.58236337", "0.5823317", "0.5818473", "0.581799", "0.58178204", "0.58163154", "0.5805598", "0.5805457", "0.5801032", "0.5796173", "0.5794922", "0.5792861", "0.5789535", "0.57882583", "0.5786319", "0.57789224", "0.57772034", "0.57748497", "0.5757419", "0.575577" ]
0.58768946
71
Instantiate this submodule with the given UI.
def __init__(self, UI): super(COTInjectConfig, self).__init__(UI) self._config_file = None self._secondary_config_file = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, ui: UI):\n super().__init__(ui)", "def __init__(self):\r\n super().__init__()\r\n self.init_ui()", "def init_ui(self):\n raise NotImplementedError", "def init_ui(self):\n raise NotImplementedError", "def init_ui(self):\n raise NotImplementedError(\"This is an abstract method.\")", "def test_ui_class_instantiates_ui_object(self):\n self.assertEqual(type(self.ui), UI, \"Does not create a ui object\")", "def ui(self):\n return ui", "def __init__(self):\n self.view = GuiView(self)\n return", "def __init__(self, ui):\n QObject.__init__(self, ui)\n self.__ui = ui\n \n self.__translator = None\n self.__loadTranslator()\n \n self.__initMenu()\n \n self.__editors = {}\n self.__mainActions = []", "def __init__(self, animPickerUI, modulesToAdd, parent=None):\n super(ART_AddModuleToCanvas, self).__init__()\n # get the directory path of the tools\n settings = QtCore.QSettings(\"Epic Games\", \"ARTv2\")\n self.toolsPath = settings.value(\"toolsPath\")\n self.iconsPath = settings.value(\"iconPath\")\n self.scriptPath = settings.value(\"scriptPath\")\n self.projectPath = settings.value(\"projectPath\")\n\n self.pickerUI = animPickerUI\n self.modules = []\n self.modulesToAdd = modulesToAdd\n\n # assign close event\n self.closeEvent = self.closeWin\n\n # build the UI\n self.buildUI()", "def init_ui(self):\n self.master.title(\"Backbone\")\n self.master.geometry(\"300x150\")\n\n self.pack(fill=BOTH, expand=1)\n\n self.btn_upload_file = Button(self, text=\"Upload file\", command=self.upload_file)\n self.btn_upload_file.place(x=90, y=10)\n\n self.btn_create_training_file = Button(self, text=\"Create & upload training file\",\n command=self.create_training_file)\n self.btn_create_training_file.place(x=30, y=40)\n\n self.btn_run_algorithm = Button(self, text=\"Run algorithm\", command=self.run_algorithm)\n self.btn_run_algorithm.place(x=80, y=70)\n\n self.btn_view_results = Button(self, text=\"View Results\", command=self.view_results)\n self.btn_view_results.place(x=85, y=100)", "def _initUI(self) -> None:\n self._createActions()\n self._addActionsToMoveButtons()\n self._createToolBar()\n self._createStatusBar()\n self._createMainContextMenu()", "def _init_ui(self):\r\n\t\t\r\n\t\tself.input_frame = Input(self)\r\n\t\tself.input_frame.pack()\r\n\t\t\r\n\t\tbutton_ok = Button(self, text = \"Ping\", command = self._go)\r\n\t\tbutton_ok.pack()\r\n\t\t\r\n\t\tself.result_frame = Result(self)\r\n\t\tself.result_frame.pack()", "def init_ui(self):\n self.parent.title(\"Roku Player Controller\")\n self.style.theme_use(\"default\")", "def showUI(cls):\r\n win = cls(uiFile)\r\n win.create()\r\n return win", "def __init__(self, parent=None):\n super(Representative, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, ui):\n QObject.__init__(self, ui)\n self.__ui = ui\n self.__initialize()\n \n self.__defaults = {\n \"VirtualEnvironmentPy2\": \"\",\n \"VirtualEnvironmentPy3\": \"\",\n \n \"UsePlainPythonPy2\": False,\n \"UsePlainPythonPy3\": False,\n \n \"ServerAddress\": \"\",\n \n \"RecentNumberApps\": 10,\n \"UseIPv6\": False,\n \n \"TranslationsEditor\": \"\",\n \n \"UseExternalBrowser\": False,\n \n \"CheckDeployMode\": False,\n }\n if isWindowsPlatform():\n self.__defaults[\"ConsoleCommandNoClose\"] = \"cmd.exe /k\"\n self.__defaults[\"ConsoleCommand\"] = \"cmd.exe /c\"\n elif isMacPlatform():\n self.__defaults[\"ConsoleCommandNoClose\"] = \"xterm -hold -e\"\n self.__defaults[\"ConsoleCommand\"] = \"xterm -e\"\n else:\n self.__defaults[\"ConsoleCommandNoClose\"] = \"konsole --noclose -e\"\n self.__defaults[\"ConsoleCommand\"] = \"konsole -e\"\n \n self.__translator = None\n self.__loadTranslator()", "def setUp(self):\n self.ui = UI()", "def ui(self, ui):\n\n self._ui = ui", "def showUI(cls):\r\n win = cls()\r\n win.create()\r\n return win", "def __init__(self, ui, inp):\n self.ui = ui\n if isinstance(inp, Buffer):\n self.display_name += \"({})\".format(inp.display_name)\n self.widget = HelpBufferView(ui, inp, self.global_keybinds)\n elif isinstance(inp, Command):\n self.display_name += \"({})\".format(inp.name)\n self.widget = HelpCommandView(ui, inp)\n\n super().__init__()", "def __init__(self, scenario_controller):\n super(Ui, self).__init__()\n\n self.scenarioController = scenario_controller\n\n\n # determine if application is a script file or frozen exe\n\n if getattr(sys, 'frozen', False):\n relative_ui_path = 'AssetsV1/MainUI.ui'\n main_ui_path = os.path.dirname(sys.executable)\n\n elif __file__:\n relative_ui_path = 'MainUI.ui'\n main_ui_path = os.path.dirname(__file__)\n\n # needs to point to MainUi.ui file\n complete_ui_path = os.path.join(main_ui_path, relative_ui_path)\n\n uic.loadUi(complete_ui_path, self)\n\n self.timeline = MyTimelineWidget(self, self.scenarioController)\n self.tabWidget.setTabsClosable(True)\n self.tabs = self.tabWidget\n self.tabs.currentChanged.connect(self.current_tab_changed) \n self.tabs.tabCloseRequested.connect(self.close_current_tab)\n\n self.showMaximized()\n\n all_subsystem_names = self.scenarioController.getAvailableSubsystemNames()\n\n self.clearMenuOptions(self.menuOpen)\n self.clearMenuOptions(self.menuFile)\n self.setMenuOptions(self.menuFile, ['Save', 'Save As', 'Save As Scenario'], self.saveMenuHandler)\n self.setMenuOptionsWithParams(self.menuNew, all_subsystem_names, self.newSubsystemHandler)\n self.setMenuOptions(self.menuOpen, ['Open Command File', 'Open Scenario'], self.openMenuHandler)\n\n self.show()\n self.timeline.show()", "def __init__(self, cfg):\n\t\t\n\t\tself.menu = False\n\t\t\n\t\tself.cfg = cfg\n\t\tself.run()", "def __init__(self):\n self.label = \"CDU-PI\"\n self.description = \"da scrivere\"\n self.canRunInBackground = False", "def __init__(self, ui):\n super(WakaTimePlugin, self).__init__(ui)\n self._ui = ui\n\n try:\n e5App().registerPluginObject(pluginTypename, self)\n except KeyError:\n pass # ignore duplicate registration", "def loadUi(uifile, baseinstance=None, workingDirectory=None):\n\n # We parse the UI file and import any required custom widgets\n customWidgets = _get_custom_widgets(uifile)\n\n loader = UiLoader(baseinstance, customWidgets)\n\n if workingDirectory is not None:\n loader.setWorkingDirectory(workingDirectory)\n\n widget = loader.load(uifile)\n QMetaObject.connectSlotsByName(widget)\n return widget", "def __init__(self, output=cli_output, ui=cli_ui, **kwargs):\n self.name = kwargs['name']\n self.greeting = kwargs.get('greeting', '')\n self.entry_frame = kwargs.get('entry', 'entry')\n self.frames = kwargs['frames']\n self.ui = ui\n self.output = output\n\n self.current_frame = self.entry_frame\n self.inventory = []", "def __init__(self, parent=None):\n super(Inj, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, handle):\r\n self.context = {}\r\n self.enyo = None\r\n self.showconsole = False\r\n activity.Activity.__init__(self, handle)\r\n\r\n self.max_participants = 1\r\n\r\n self.make_toolbar()\r\n self.make_mainview()", "def __init__(self):\n super(QTUIProject, self).__init__()\n self.setupUi(self)\n self.assignWidgets()\n self.show()\n self.SlotsJsonName = \"Slots Assets Folder\"\n self.BingoJsonName = \"Bingo Assets Folder\"", "def gui(self):\n return gui", "def initGui(self):\n\n icon_path = ':/plugins/new_raptor/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Add New Raptor nest'),\n callback=self.run,\n parent=self.iface.mainWindow())\n\n # will be set False in run()\n self.first_start = True", "def __init__(self, gui, path, *args):\n pass", "def init_app(self):\n \n tk_multi_depedencies = self.import_module(\"tk_multi_depedencies\")\n \n # register commands:\n display_name = self.get_setting(\"display_name\")\n \n # \"Publish Render\" ---> publish_render\n command_name = display_name.lower().replace(\" \", \"_\")\n if command_name.endswith(\"...\"):\n command_name = command_name[:-3]\n params = {\"short_name\": command_name, \n \"title\": \"%s...\" % display_name,\n \"description\": \"Publishing dependencies into Shotgun\"}\n\n show_dialog_callback = lambda: self.engine.show_dialog(display_name, self, tk_multi_depedencies.Dialog, self)\n \n self.log_debug(\"Registering command for tk-multi-dependencies\")\n self.engine.register_command(\"%s...\" % display_name, \n show_dialog_callback, \n params)", "def __init__(self, ggui):\n self.gui: gamegui = ggui\n super().__init__(self.gui.top, \"New Game\")", "def init_ui(self):\n\n self.master.title(\"Upload file\")\n self.master.geometry(\"300x200\")\n\n self.pack(fill=BOTH, expand=1)\n\n self.btn_select_file = Button(self, text=\"Select file\", command=self.on_open)\n self.btn_select_file.place(x=80, y=50)\n\n self.selected_file_name = Label(self, text=\"<Selected file name>\")\n self.selected_file_name.place(x=60, y=90)\n\n self.btn_upload_file = Button(self, text=\"Upload file\", command=self.upload_file)\n self.btn_upload_file.place(x=80, y=130)\n\n self.btn_back = Button(self, text=\"Back\", command=self.go_back)\n self.btn_back.place(x=10, y=10)", "def _setup_ui(self):\n\n self.window = ui.Widget()\n self.window.dimensions = ui.normalize_dimension((\n 0, 0,\n self.normalized_screen_resolution[0],\n self.normalized_screen_resolution[1]\n ))\n self.window.background_color = ImageColor.getcolor('#000000', 'RGB')\n\n interface_frame = ui.Widget(parent=self.window)\n interface_frame.dimensions = ui.normalize_dimension((\n self.preview_renderer.window[2],\n 0,\n self.normalized_screen_resolution[0] - self.preview_renderer.window[2],\n self.normalized_screen_resolution[1]\n ))\n interface_frame.background_color = ImageColor.getcolor('#ffffff', 'RGB')\n\n number = ui.LabelWidget(\"\",\n name=NAME_GET_STARTED,\n parent=interface_frame,\n align=\"center\",\n font_color=(0, 0, 0, 255))\n number.dimensions = (\n 5, 5,\n interface_frame.width - 10,\n interface_frame.height - 10\n )", "def __init__(self):\n # Passing the class make this Python 2 and Python 3 compatible\n super(MayaSceneLevelGeneratorUI, self).__init__(parent=maya_main_window())\n\n # Create the generators needed\n self._level_gen = level.LevelGenerator([blocks.BlockFile(\"\", blk_type) for blk_type in VALID_BLOCK_TYPES])\n self._scene_gen = MayaSceneLevelGenerator(None) # Fill in level at button press time\n\n # Window things\n self.setWindowTitle(\"Maya Scene Level Generator\")\n self.resize(500, 200)\n self.setWindowFlags(self.windowFlags() ^ PySide2.QtCore.Qt.WindowContextHelpButtonHint)\n\n # Set up for the first time\n self._create_widgets()\n self._create_layout()\n self._refresh_view()\n self._create_connections() # Order matters, since refreshing triggers connections\n\n print(self._level_gen.block_list) # TODO delete", "def __init__(self, parent=None):\n\t\tScriptedLoadableModuleWidget.__init__(self, parent)\n\t\tVTKObservationMixin.__init__(self) # needed for parameter node observation\n\t\tself.logic = None\n\t\tself._parameterNode = None\n\t\tself._updatingGUIFromParameterNode = False\n\t\t\n\t\tself.elecModel = None\n\t\tself.elecModelLastButton = None\n\t\tself.elecModelButton = 0\n\t\tself.elecChanLastButton = None\n\t\tself.elecChanButton = 0\n\t\tself.lastPolButton=0\n\t\tself.active = False", "def prepare_UI(self):", "def __init__(self):\n self.label = \"CDU-CS\"\n self.description = \"da scrivere\"\n self.canRunInBackground = False", "def InitUI(self):\n\t\tself._initMenuBar()\n\t\tself._initLayout()\n\t\t\n\t\t# Bindings\n\t\tself.Bind(wx.EVT_BUTTON, self.OnButtonClicked)\n\t\t\n\t\t# We can't even start without an input file\n\t\tself.OnOpen(None)", "def __init__(self, parent=None):\n super(RobotSelection, self).__init__(parent)\n self.parent = parent\n self.initUI()", "def initGui(self):\n\n icon_path = ':/plugins/LidarProcessor/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'LiDAR Operations'),\n callback=self.run,\n parent=self.iface.mainWindow())", "def __init__(self, parent=None):\n self._window = None\n\n self.setup_ui()", "def create(self, parent):\n self.widget = QtCore.QObject(parent)", "def init_ui(self):\n # Create GUI elements, set them in dict structure\n labelwidth = 150\n\n # Add parameter line edit for Factor Tm to Tp\n self.input_elements['factor Tm Tp'] = widgets.ParameterInputLine(\n label='Factor Tm naar Tp:',\n labelwidth=labelwidth,\n unitlabel='(NVT: Tp aanwezig)' if 'Tp' in self.hydraulic_loads.columns else '',\n validator=QtGui.QDoubleValidator(0.01, 99.99, 20),\n )\n\n if 'Tp' in self.hydraulic_loads.columns or self.parent_tab.step != 'I1':\n self.input_elements['factor Tm Tp'].set_enabled(False)\n\n # Add line edit with browsebutton for Master template\n self.input_elements['mastertemplate'] = widgets.ExtendedLineEdit(\n label='Master template bestand:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_master_template)\n )\n\n # Add line edit with browsebutton for depth file\n self.input_elements['depthfile'] = widgets.ExtendedLineEdit(\n label='Bathymetry bestand:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_bathymetry_file)\n )\n\n # Add line edit with browsebutton for swan result folder\n self.input_elements['swanfolder'] = widgets.ExtendedLineEdit(\n label='SWAN uitvoer folder:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_swan_folder)\n )\n\n\n self.setLayout(QtWidgets.QVBoxLayout())\n self.layout().setSpacing(10)\n\n for _, item in self.input_elements.items():\n self.layout().addWidget(item)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.layout().addWidget(line)\n\n # OK and Cancel buttons\n self.generateButton = QtWidgets.QPushButton('Genereer invoer')\n self.generateButton.setDefault(True)\n self.generateButton.clicked.connect(self.generate)\n\n self.cancelButton = QtWidgets.QPushButton('Annuleren')\n self.cancelButton.setAutoDefault(False)\n self.cancelButton.clicked.connect(self.cancel)\n\n button_box = QtWidgets.QDialogButtonBox(QtCore.Qt.Horizontal, self)\n button_box.addButton(self.generateButton, QtWidgets.QDialogButtonBox.ActionRole)\n button_box.addButton(self.cancelButton, QtWidgets.QDialogButtonBox.RejectRole)\n button_box.accepted.connect(QtWidgets.QDialog.accept)\n\n self.layout().addWidget(button_box)", "def __init__(self, interface):\n self.interface = interface\n self.interface.setNewWindowCallback(self.addMenu)\n self.action = QtGui.QAction('E&xport to Treepad...',\n self.interface.mainControl(),\n statusTip='Exports entire file to Treepad text format')\n self.action.triggered.connect(self.exportToTreepad)\n self.addMenu()", "def __init__(self, parent=None):\n ScriptedLoadableModuleWidget.__init__(self, parent)\n VTKObservationMixin.__init__(self) # needed for parameter node observation\n self.logic = None", "def initUI(self):\n\n self.wid = RosGenWidget()\n self.setCentralWidget(self.wid)\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('&Файл')\n editMenu = menubar.addMenu('&Редактирование')\n self.create_menu_par('Менеджер подписчиков и издателей', self.wid.show_manager, fileMenu, 'Ctrl+M')\n self.create_menu_par('Очистить', self.wid.clear_all_lines, editMenu, 'Ctrl+D')\n self.create_menu_par('Загрузить данные из...', self.wid.open_fileDialog, fileMenu, 'Ctrl+F')\n self.create_menu_par('Сохранить как...', self.wid.save_fileDialog, fileMenu, 'Ctrl+S')\n self.create_menu_par('Выход', self.exit_app, fileMenu, 'Esc')\n self.statusbar = self.statusBar()\n self.statusbar.showMessage('Ожидание данных')\n self.wid.msg2Statusbar[str].connect(self.statusbar.showMessage)\n self.setGeometry(600, 200, 700, 400)\n self.setWindowTitle('Генератор шаблонов ROS-приложения')\n self.show()", "def __init__(self, ui_file, controller):\n super(InputDeviceView, self).__init__()\n self._controller = controller\n self._controller.accepted_cb = self._accepted_cb\n self._controller.finished_cb = self._finished_cb\n self._controller.rejected_cb = self._rejected_cb\n self._controller.current_gait_cb = self._current_gait_cb\n\n self._always_enabled_buttons = []\n\n # Extend the widget with all attributes and children from UI file\n loadUi(ui_file, self)\n\n self.refresh_button.clicked.connect(self._update_possible_gaits)\n\n self._create_buttons()\n self._update_possible_gaits()", "def __init__(self, parent=None):\n ScriptedLoadableModuleWidget.__init__(self, parent)\n VTKObservationMixin.__init__(self) # needed for parameter node observation\n self.logic = None\n self._parameterNode = None\n self._updatingGUIFromParameterNode = False", "def create_app(self):\n\n template_folder = T_SYSTEM_PATH + \"/remote_ui/www\"\n static_folder = template_folder + \"/static\"\n\n remote_ui = RemoteUI(args={\"host\": \"localhost\", \"port\": \"5000\", \"debug\": True, \"mode\": \"testing\"}, template_folder=template_folder, static_folder=static_folder)\n\n return remote_ui.app", "def populateUI():\n \n # Main form layout\n form = cmds.formLayout()\n\n # Tab Layout\n tabs = cmds.tabLayout(innerMarginWidth=5, innerMarginHeight=5)\n # Form attachment config\n cmds.formLayout( form, edit=True, attachForm=((tabs, 'top', 0), (tabs, 'left', 0), (tabs, 'bottom', 0), (tabs, 'right', 0)) )\n\n # The different Tabs on the window\n spawnTab = SpawnObjectsTab()\n roadTab = RoadRiverTab()\n environmentTab = EnvironmentTab()\n\n # Tab creation\n cmds.tabLayout( tabs, edit=True, tabLabel=((spawnTab, 'Spawn Buildings'), (roadTab, 'Create Roads / Rivers'), (environmentTab, \"Create Environment\") ))", "def __init__(self, parent):\r\n Frame.__init__(self, parent) \r\n \r\n self.parent = parent\r\n self.initUI()", "def __init__(self):\n\n self.ai_list = []\n self.master = Tk()\n self.ui_draw = IntVar()\n\n self.populate_ai_list(self.ai_list)\n self.frame = Frame(self.master)\n self.build_ui()\n\n self.master.mainloop()", "def buildUI(self):\n\n if cmds.window(\"pyART_AddToCanvasWIN\", exists=True):\n cmds.deleteUI(\"pyART_AddToCanvasWIN\", wnd=True)\n\n # create the main window\n self.mainWin = QtWidgets.QMainWindow(self.pickerUI)\n\n # create the main widget\n self.mainWidget = QtWidgets.QWidget()\n self.mainWin.setCentralWidget(self.mainWidget)\n\n # create the mainLayout\n self.layout = QtWidgets.QVBoxLayout(self.mainWidget)\n\n # load stylesheet\n styleSheetFile = utils.returnNicePath(self.toolsPath, \"Core/Scripts/Interfaces/StyleSheets/animPicker.qss\")\n f = open(styleSheetFile, \"r\")\n self.style = f.read()\n f.close()\n\n self.mainWin.setStyleSheet(self.style)\n\n self.mainWin.setMinimumSize(QtCore.QSize(250, 400))\n self.mainWin.setMaximumSize(QtCore.QSize(250, 400))\n self.mainWin.resize(250, 400)\n\n # set qt object name\n self.mainWin.setObjectName(\"pyART_AddToCanvasWIN\")\n self.mainWin.setWindowTitle(\"Add Module To Canvas\")\n\n # label, listWidget, button\n label = QtWidgets.QLabel(\"Available Modules:\")\n label.setProperty(\"boldFont\", True)\n self.layout.addWidget(label)\n\n self.moduleList = QtWidgets.QListWidget()\n self.moduleList.setMaximumSize(230, 300)\n self.moduleList.setMinimumSize(230, 300)\n self.layout.addWidget(self.moduleList)\n\n # add modules to listWidget\n self.addModulesToList()\n\n # create add button\n button = QtWidgets.QPushButton(\"Add Selected To Canvas\")\n self.layout.addWidget(button)\n button.setObjectName(\"blueButton\")\n button.clicked.connect(self.addSelectedToCanvas)\n\n # show ui\n self.mainWin.show()", "def init_UI(self):\n\n self.master.title(\"Create and upload training file\")\n self.master.geometry('400x400')\n\n self.text_area = scrolledtext.ScrolledText(self)\n self.text_area.pack()\n\n self.user_input = Entry(self, width=10)\n self.user_input.pack()\n\n sys.stdout = RedirectOutputText(self.text_area)\n\n self.create_uncertain_pairs_file()\n\n self.console_label = ConsoleLabel(self.get_uncertain_pairs_file())\n self.current_record_pair = self.console_label.get_uncertain_pair()\n\n self.btn_next = Button(self, text=\"Next\", bg=\"green\", command=self.get_input)\n self.btn_next.pack()\n\n self.back = Button(self, text=\"Back\", command=self.go_back)\n self.back.pack()", "def initGui(self):\n\n icon_path = ':/plugins/buienradar_plugin/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Buienradar Plugin'),\n callback=self.run,\n parent=self.iface.mainWindow())\n\n # will be set False in run()\n self.first_start = True", "def create_app(self):\n\n template_folder = T_SYSTEM_PATH + \"/remote_ui/www\"\n static_folder = template_folder + \"/static\"\n\n remote_ui = RemoteUI(args={\"host\": \"localhost\", \"port\": \"5000\", \"debug\": True, \"mode\": \"testing\"})\n\n return remote_ui.app", "def create_widget(self):\n self.widget = UILabel()", "def __init__(self):\n self.label = \"CDU-PAT\"\n self.description = \"da scrivere\"\n self.canRunInBackground = False", "def _ui_module(self, name, module):\n raise NotImplementedError()", "def __init__(self, config=None, parent=None, catchall=True, autojson=True):\n\n Bottle.__init__(self, catchall, autojson)\n RonObject.__init__(self, config=config)\n\n if not self.view:\n self.view = View(config = {'module':self})\n\n if isinstance(config, dict):\n self.__namespace = self._get_module_namespace()\n if not self.base_path:\n self.__package = import_module(self.__namespace)\n self.base_path = os.path.dirname(self.__package.__file__)\n self.parent = parent\n\n self.load_components()", "def __init__(self: object) -> None:\n super().__init__()\n self.title(\"dnazip\")\n self.configure(bg='#ebebeb')\n self.create_main()\n self.create_menu()\n self.create_buttons()\n self.file = None", "def loadUi(uifile, baseinstance=None, custom_widgets=None):\n return _loadUi(uifile, baseinstance, custom_widgets)", "def __init__(self, type='GUI', args='', wm=None):\n Module.__init__(self)\n\n # set a few instance variables:\n self.runProgress = 0\n\n # Keep track of all plots created:\n self.plots = dict()", "def start_ui(self):\n\t\tself.start_animation()\n\t\tself.app.exec()", "def init_ui(self):\n # Create GUI elements, set them in dict structure\n labelwidth = 150\n\n # Add parameter line edit for Factor Tm to Tp\n\n # Add line edit with browsebutton for swan result folder\n self.input_elements['hares folder'] = widgets.ExtendedLineEdit(\n label='HARES uitvoerbestanden folder:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_hares_folder)\n )\n\n\n self.setLayout(QtWidgets.QVBoxLayout())\n self.layout().setSpacing(10)\n\n for _, item in self.input_elements.items():\n self.layout().addWidget(item)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.layout().addWidget(line)\n\n # OK and Cancel buttons\n self.generateButton = QtWidgets.QPushButton('Start lezen uitvoerbestanden')\n self.generateButton.setDefault(True)\n self.generateButton.clicked.connect(self.generate)\n\n self.cancelButton = QtWidgets.QPushButton('Annuleren')\n self.cancelButton.setAutoDefault(False)\n self.cancelButton.clicked.connect(self.cancel)\n\n button_box = QtWidgets.QDialogButtonBox(QtCore.Qt.Horizontal, self)\n button_box.addButton(self.generateButton, QtWidgets.QDialogButtonBox.ActionRole)\n button_box.addButton(self.cancelButton, QtWidgets.QDialogButtonBox.RejectRole)\n button_box.accepted.connect(QtWidgets.QDialog.accept)\n\n self.layout().addWidget(button_box)", "def initGui(self):\n from p4_view import Gui\n self.updateStatus(\"Launching GUI...\")\n self.gui = Gui(self, self.lmap)\n self.gui.setStart(self.cfg[\"START\"])\n self.gui.setGoal(self.cfg[\"GOAL\"])\n self.gui.setPossGoals(self.cfg[\"POSS_GOALS\"])\n #GHD\n self.gui.setMapName(self.cfg[\"MAP_FILE\"])\n self.updateStatus(\"OK\")\n self.gui.mainloop()", "def initGui(self):\n\n icon_path = ':/plugins/Integracion/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u''),\n callback=self.run,\n parent=self.iface.mainWindow())", "def initGui(self):\n\n self.action = QAction(QIcon(self.plugdir + '/icon.png'), 'MGB Water Balance', self.iface.mainWindow())\n self.action.triggered.connect(self.run)\n self.iface.addToolBarIcon(self.action)\n self.iface.addPluginToMenu('&IPH - Plugins', self.action)", "def __init__(self, main_win, parent=None):\n super(Tabs, self).__init__(parent)\n self.main_win = main_win\n\n if self.main_win.beamline is not None:\n try:\n beam = importlib.import_module('beamlines.' + self.main_win.beamline + '.beam_tabs')\n except Exception as e:\n print(e)\n msg_window('cannot import beamlines.' + self.main_win.beamline + ' module')\n raise\n self.prep_tab = beam.PrepTab()\n self.format_tab = DataTab()\n self.rec_tab = RecTab()\n self.display_tab = beam.DispTab()\n self.tabs = [self.prep_tab, self.format_tab, self.rec_tab, self.display_tab]\n else:\n self.format_tab = DataTab()\n self.rec_tab = RecTab()\n self.tabs = [self.format_tab, self.rec_tab]\n\n for tab in self.tabs:\n self.addTab(tab, tab.name)\n tab.init(self, main_win)", "def initUI(self):\n self.logger.debug('Setting up the Measurement GUI')\n self.setWindowTitle(self.title)\n\n self.show()\n\n self.make_combobox_scanner()\n self.make_combobox_movements()\n self.make_combobox_configurate()\n self.make_combobox_basic()", "def __init__(self, parent):\n super(DummyStageInterface, self).__init__(parent)\n\n open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR,\n self.tb_size)\n self.tb.AddLabelTool(wx.ID_OPEN, \"Open\", open_bmp)\n\n self.tb.Realize()", "def initializeUI(self):\n self.setStyleSheet(abstyle)\n self.setGeometry(140, 100, 860, 484)\n self.setWindowTitle('Emotions Data View')\n self.setupModelView()", "def create_ui(self, parent):\n view = View(\n Item(name=\"text\",\n show_label=False,\n editor=ImageTraitEditor(\n image=ImageResource(self.obj.name,\n search_path=[self.obj.parent.absolute_path]) )),\n id=\"puddle.image_editor.image_editor\",\n kind=\"live\", resizable=True)\n\n ui = self.edit_traits(view=view, parent=parent, kind=\"subpanel\")\n\n return ui", "def __init__(self, inner_widget=None):\n super(GameEngineUI, self).__init__(parent=qtutils.get_maya_window())\n self.setupUi(self)\n\n self.start_btn = StartButton(self)\n self.inner_widget_vlay.addWidget(self.start_btn)\n\n self.inner_widget = inner_widget\n if inner_widget is not None:\n self.inner_widget_vlay.addWidget(inner_widget)\n # end if\n\n self.game_engine = GameEngine()\n\n # Maximize\n desktop = QtGui.QApplication.instance().desktop()\n available_geometry = desktop.screenGeometry(QtGui.QCursor().pos())\n self.setGeometry(available_geometry.x(), 0, available_geometry.width(), available_geometry.height())", "def init_UI(self):\n\n self.master.title(\"Search for different companies\")\n self.master.geometry(\"400x400\")\n\n self.label_combobox = Label(self, text=\"Search by\")\n self.label_combobox.pack()\n\n self.combo_searching_options = Combobox(self, state=\"readonly\")\n self.combo_searching_options['values'] = self.combobox_values\n self.combo_searching_options.pack()\n\n self.label_input = Label(self, text=\"Entry the value\")\n self.label_input.pack()\n\n self.user_input = Entry(self, width=40)\n self.user_input.pack()\n\n self.btn_submit = Button(self, text=\"Submit\", command=self.submit)\n self.btn_submit.pack()\n\n self.text_area = scrolledtext.ScrolledText(self)\n self.text_area.pack()\n\n sys.stdout = RedirectOutputText(self.text_area)\n\n self.btn_back = Button(self, text=\"Back\", command=self.go_back)\n self.btn_back.pack()", "def build_ui(self):\n self.ui = UI_procstep.Ui_Form()#.Ui_USGSContactInfoWidgetMain()\n self.ui.setupUi(self)\n self.setup_dragdrop(self)\n\n self.proc_step = RepeatingElement(which='tab',\n tab_label='Step', add_text='Additional Step',\n widget=ProcessStep, remove_text='Remove Step', italic_text='Processing Steps Taken')\n\n #self.proc_step = RepeatingElement(params=params, which='tab', tab_label='Source',)\n self.proc_step.add_another()\n self.ui.widget_procstep.layout().addWidget(self.proc_step)", "def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Ciné Club\")\n self.setup_ui() # Ajout des Widgets.\n self.populate_movies()\n self.setup_connexions() # Création des connexion entre widgets.", "def loadUI(*args, listTypes: bool=True, uiFile: AnyStr=\"\", uiString: AnyStr=\"\", verbose:\n bool=True, workingDirectory: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def __init__(self, baseinstance, customWidgets=None):\n\n QUiLoader.__init__(self, baseinstance)\n\n self.baseinstance = baseinstance\n\n if customWidgets is None:\n self.customWidgets = {}\n else:\n self.customWidgets = customWidgets", "def __init__(self):\n\n self.plugin_name = 'Yum'", "def ui_setup(self):\n loader = QUiLoader()\n file = QFile('./user_interface/form/main_window.ui')\n file.open(QFile.ReadOnly)\n self._window = loader.load(file)\n file.close()\n\n status_bar = QStatusBar(self._window)\n status_bar.showMessage(__copyright__)\n self._window.setStatusBar(status_bar)\n self._window.setWindowIcon(QIcon('./user_interface/media/bucketing_icon.jpeg'))\n self._window.setWindowTitle('PySide2 Project - Basic UI Framework')\n\n self._option_panel = OptionPanel()\n self._option_panel.add_button('DekBan', './user_interface/media/dekban.png')\n self._option_panel.add_button('Charlie', './user_interface/media/charlie.jpeg')\n self._option_panel.add_button('Simon', './user_interface/media/Simon.jpeg')\n\n # Add widget to main layout\n main_layout = self._window.main_layout\n main_layout.itemAtPosition(0, 0).setAlignment(QtCore.Qt.AlignCenter)\n main_layout.itemAtPosition(0, 1).setAlignment(QtCore.Qt.AlignVCenter)\n main_layout.addWidget(self._option_panel, 2, 0, 1, 1)\n\n # Add page widget to stack\n self._pages['item'] = ItemWidget()\n self._pages['text1'] = TextPage(text=PAUSE_TEXT)\n self._pages['text2'] = TextPage(text=STOP_TEXT)\n\n for index, name in enumerate(self._pages):\n print('pages {} : {} page'.format(index, name))\n self._window.widget_stack.addWidget(self._pages[name].widget)\n\n self._window.widget_stack.setCurrentIndex(0)\n\n # Build up signal / slot\n self._option_panel.currentItemChanged.connect(self.set_page)", "def __init__(self, *args, **kwargs):\r\n if args:\r\n self.addonId = args[0]\r\n else:\r\n posIni = len('plugin://')\r\n posFin = sys.argv[0].find('/', posIni)\r\n addonId = sys.argv[0][posIni:posFin]\r\n self.addonId = kwargs.get('id',None) or addonId", "def create(self):\n self.add_handlers({\"^T\": self.change_forms,\"^Q\": self.exit})\n self.add(npyscreen.TitleFixedText, name='Inventory items:', value='')\n self.inventory_mle = self.add(npyscreen.Pager,\n values=['Checking for plugins in the inventory, please wait...'])", "def __init__(self, master=None):\r\n \r\n tk.Frame.__init__(self, master)\r\n self.master.title('AQUAMI')\r\n module_path, this_filename = os.path.split(__file__)\r\n try:\r\n self.master.iconbitmap(''.join((module_path, '/icon.ico')))\r\n except:\r\n pass\r\n\r\n self.initGUI()", "def create(self, parent):\n self.widget = _QMainWindow(parent)", "def __init__(self):\n self.label = \"DBT-2-DXF\"\n self.description = \"da scrivere\"\n self.canRunInBackground = False", "def initGui(self):\n\n icon_path = ':/plugins/Hybriddekning/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Hybriddekning'),\n callback=self.run,\n parent=self.iface.mainWindow())", "def __init__(self, vlc_player, drone_gui):\n QMainWindow.__init__(self)\n self.setWindowTitle(\"VLC Drone Video Player\")\n\n # save the media player\n self.mediaplayer = vlc_player\n\n # need a reference to the main drone vision class\n self.drone_vision = drone_gui\n\n # create the GUI\n self.createUI()", "def __init__(self):\n super().__init__()\n # Main UI code goes here\n lineEdit = qtw.QLineEdit(\"A Line edit!\", self)\n label = qtw.QLabel(\"Hello Everybody\", self)\n \n # End main UI code\n self.show()", "def __init__(self, controller):\n global gui\n print(\"Upload folder: \" + UPLOAD_FOLDER)\n gui = self\n self.controller = controller\n try:\n run()\n finally:\n self.controller.quit()", "def __init__(self, parent):\n self.name = \"Base.View\"\n self.parent = parent\n self.Main = parent.Main", "def __init__(self, gui = None):\n\n QtCore.QThread.__init__(self)\n self.gui = gui\n\n return None", "def __init__(self):\n _hypre.HypreILU_swiginit(self, _hypre.new_HypreILU())", "def __init__(self):\n self.model = self.load_model()\n self.form_html = self.create_form_html()", "def __init__(self, *args):\n this = _libsbml.new_CompSBasePlugin(*args)\n try: self.this.append(this)\n except: self.this = this", "def create_submodule(self, *args: Any, **kwargs: Any) -> Submodule:\n return Submodule.add(self, *args, **kwargs)", "def __init__(self, controller):\n super().__init__(controller)\n\n # The hovered input when entering this View.\n self.first_inp = \"s\"\n\n # Initialize selected variable.\n self.selected = None\n\n # Make background graphics.\n self.make_background_graphics()\n\n # Make Buttons.\n self.make_buttons()\n\n # Make the information box. This explains each Button.\n self.make_info_box()\n\n # Initializes popup.\n self.make_popup()\n\n # Map of input to functions.\n enter = self.graphics.ENTER_KEY\n self.controls = {\n # Pressing \"q\" will go back to the main menu.\n \"q\": lambda: Action(\"goto main menu view\", []),\n\n # Movement keys.\n \"w\": lambda: self.move_cursor(Direction.U),\n \"a\": lambda: self.move_cursor(Direction.L),\n \"s\": lambda: self.move_cursor(Direction.D),\n \"d\": lambda: self.move_cursor(Direction.R),\n\n # Repeat the last valid input.\n enter: self.repeat_last_valid_input,\n\n # Click the selected UIElement.\n \"m\": self.click\n }" ]
[ "0.71629465", "0.6812497", "0.65096986", "0.65096986", "0.6493672", "0.6486345", "0.63511", "0.63335776", "0.62438047", "0.62001234", "0.6143851", "0.6113456", "0.60962325", "0.60550797", "0.6031183", "0.60106504", "0.5976868", "0.5973087", "0.59472775", "0.59101176", "0.58899575", "0.58865273", "0.5866452", "0.58392423", "0.58220094", "0.58057153", "0.57989323", "0.5781516", "0.57678896", "0.5764244", "0.5757002", "0.57568467", "0.5755934", "0.5731385", "0.5728821", "0.57177484", "0.57071984", "0.5663977", "0.5662925", "0.5656067", "0.56504285", "0.5649472", "0.5629112", "0.56236404", "0.5614723", "0.5598098", "0.55949146", "0.55944705", "0.5587896", "0.5587184", "0.55862135", "0.55854064", "0.5582007", "0.5576374", "0.557186", "0.55700314", "0.5567016", "0.55622727", "0.5556154", "0.5553663", "0.55508345", "0.5550594", "0.5544302", "0.5538368", "0.55145866", "0.551424", "0.5511341", "0.55105567", "0.55103016", "0.54992926", "0.54841024", "0.54839027", "0.54819375", "0.54775125", "0.54714006", "0.5469788", "0.54663706", "0.5464437", "0.54580104", "0.54548675", "0.5437491", "0.5433511", "0.54326254", "0.54280186", "0.54247326", "0.5424213", "0.54214734", "0.5419595", "0.5418093", "0.5417688", "0.5411518", "0.5404528", "0.5400381", "0.5396812", "0.53927493", "0.53861195", "0.5385397", "0.5373551", "0.53726286", "0.53697664", "0.53691584" ]
0.0
-1
Do the actual work of this submodule.
def run(self): super(COTInjectConfig, self).run() vm = self.vm platform = vm.platform # Find the disk drive where the config should be injected # First, look for any previously-injected config disk to overwrite: if platform.BOOTSTRAP_DISK_TYPE == 'cdrom': (f, d, ci, drive_device) = vm.search_from_filename('config.iso') elif platform.BOOTSTRAP_DISK_TYPE == 'harddisk': (f, d, ci, drive_device) = vm.search_from_filename('config.vmdk') else: raise ValueUnsupportedError("bootstrap disk type", platform.BOOTSTRAP_DISK_TYPE, "'cdrom' or 'harddisk'") if f is not None: file_id = vm.get_id_from_file(f) self.UI.confirm_or_die( "Existing configuration disk '{0}' found.\n" "Continue and overwrite it?".format(file_id)) logger.warning("Overwriting existing config disk '{0}'" .format(file_id)) else: file_id = None # Find the empty slot where we should inject the config drive_device = vm.find_empty_drive(platform.BOOTSTRAP_DISK_TYPE) if drive_device is None: raise LookupError("Could not find an empty {0} drive to " "inject the config into" .format(platform.BOOTSTRAP_DISK_TYPE)) (cont_type, drive_address) = vm.find_device_location(drive_device) # Copy config file(s) to per-platform name in working directory config_files = [] if self.config_file: dest = os.path.join(vm.working_dir, platform.CONFIG_TEXT_FILE) shutil.copy(self.config_file, dest) config_files.append(dest) if self.secondary_config_file: dest = os.path.join(vm.working_dir, platform.SECONDARY_CONFIG_TEXT_FILE) shutil.copy(self.secondary_config_file, dest) config_files.append(dest) # Package the config files into a disk image if platform.BOOTSTRAP_DISK_TYPE == 'cdrom': bootstrap_file = os.path.join(vm.working_dir, 'config.iso') create_disk_image(bootstrap_file, contents=config_files) elif platform.BOOTSTRAP_DISK_TYPE == 'harddisk': bootstrap_file = os.path.join(vm.working_dir, 'config.img') create_disk_image(bootstrap_file, file_format='raw', contents=config_files) else: raise ValueUnsupportedError("bootstrap disk type", platform.BOOTSTRAP_DISK_TYPE, "'cdrom' or 'harddisk'") # Inject the disk image into the OVA, using "add-disk" functionality add_disk_worker( UI=self.UI, vm=vm, DISK_IMAGE=bootstrap_file, type=platform.BOOTSTRAP_DISK_TYPE, file_id=file_id, controller=cont_type, address=drive_address, subtype=None, description='Configuration disk', diskname=None, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n obj = UnityFilesystem()\n obj.perform_module_operation()", "def __gitSubmodulesSync(self):\n self.vcs.gitSubmoduleSync(self.project.getProjectPath())", "def do_workload(self):\n module_manager = self._core.get_module_manager()\n module = module_manager.get_module_by_name(self._values[\"name\"])\n module_manager.update_module(module)", "def __gitSubmodulesInit(self):\n self.vcs.gitSubmoduleInit(self.project.getProjectPath())", "def do_work(self):", "def __gitSubmoduleAdd(self):\n self.vcs.gitSubmoduleAdd(self.project.getProjectPath())", "def __gitSubmodulesUpdate(self):\n self.vcs.gitSubmoduleUpdate(self.project.getProjectPath())", "def run(self):\n\t\t\n\t\tpass", "def _run(self):\n raise NotImplementedError", "def _run(self):\n raise NotImplementedError", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def RUN(self):", "def do_work(self):\n raise NotImplementedError", "def run(self):\r\n pass", "def __gitSubmodulesUpdateInit(self):\n self.vcs.gitSubmoduleUpdate(self.project.getProjectPath(),\n initialize=True)", "def do_workload(self):\n module_manager = self._core.get_module_manager()\n module_manager.install_module(self.get_meta())", "def run(self):\n \n pass", "def __gitSubmodulesDeinit(self):\n self.vcs.gitSubmoduleDeinit(self.project.getProjectPath())", "def run(self):\n self.update_repos()", "def run(self): \r\n return", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):\n self.run()", "def execute_module(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError # implement in subclass", "def modules():", "def run(self, root):\r\n pass", "def process(self):\n pass", "def Run():\r\n pass", "def run(self):\n try:\n self._run_internal()\n finally:\n self._cleanup()", "def main(self):\r\n pass", "def _run(self):\n raise NotImplementedError", "def setup( self ):", "def do_manipulations(self, *args, **kwargs):\n pass", "def run(self):\n raise NotImplementedError(\"Subclasses mut override run()\")", "def __run(self):\n sys.settrace(self.globaltrace)\n self.__run_backup()\n self.run = self.__run_backup", "def run(self):\n self._setupLogger()\n self.setup()\n\n self.logger.info(self.moduleName + \" starting run loop.\")\n\n while True:\n self.loop()", "def run(self):\n raise NotImplementedError()", "def run(self):\n raise NotImplementedError()", "def setup(self):", "def setup(self):", "def setup(self):", "def setup(self):", "def __init__(self):\n ScriptedLoadableModuleLogic.__init__(self)", "def do_build(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcfg = self.cfg\n\t\tself.log('PHASE: build, repository work', level=logging.DEBUG)\n\t\tmodule_id_list = self.module_ids()\n\t\tif self.build['deps_only']:\n\t\t\tmodule_id_list_build_only = filter(lambda x: cfg[x]['shutit.core.module.build'], module_id_list)\n\t\tfor module_id in module_id_list:\n\t\t\tmodule = self.shutit_map[module_id]\n\t\t\tself.log('Considering whether to build: ' + module.module_id, level=logging.INFO)\n\t\t\tif cfg[module.module_id]['shutit.core.module.build']:\n\t\t\t\tif self.build['delivery'] not in module.ok_delivery_methods:\n\t\t\t\t\tself.fail('Module: ' + module.module_id + ' can only be built with one of these --delivery methods: ' + str(module.ok_delivery_methods) + '\\nSee shutit build -h for more info, or try adding: --delivery <method> to your shutit invocation') # pragma: no cover\n\t\t\t\tif self.is_installed(module):\n\t\t\t\t\tself.build['report'] = (self.build['report'] + '\\nBuilt already: ' + module.module_id + ' with run order: ' + str(module.run_order))\n\t\t\t\telse:\n\t\t\t\t\t# We move to the module directory to perform the build, returning immediately afterwards.\n\t\t\t\t\tif self.build['deps_only'] and module_id == module_id_list_build_only[-1]:\n\t\t\t\t\t\t# If this is the last module, and we are only building deps, stop here.\n\t\t\t\t\t\tself.build['report'] = (self.build['report'] + '\\nSkipping: ' + module.module_id + ' with run order: ' + str(module.run_order) + '\\n\\tas this is the final module and we are building dependencies only')\n\t\t\t\t\telse:\n\t\t\t\t\t\trevert_dir = os.getcwd()\n\t\t\t\t\t\tself.get_current_shutit_pexpect_session_environment().module_root_dir = os.path.dirname(self.shutit_file_map[module_id])\n\t\t\t\t\t\tself.chdir(self.get_current_shutit_pexpect_session_environment().module_root_dir)\n\t\t\t\t\t\tself.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)\n\t\t\t\t\t\tself.build_module(module)\n\t\t\t\t\t\tself.logout(echo=False)\n\t\t\t\t\t\tself.chdir(revert_dir)\n\t\t\tif self.is_installed(module):\n\t\t\t\tself.log('Starting module',level=logging.DEBUG)\n\t\t\t\tif not module.start(self):\n\t\t\t\t\tself.fail(module.module_id + ' failed on start', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover", "def run(self):\n # Call to the method of the parent to\n # clean common files and directories\n Clean.run(self)\n\n # Remove C and C++ files if the current working directory\n # is not a source distribution, since the source files\n # are needed by the package in release mode\n cwd = abspath(dirname(__file__))\n remove_c_files = not exists(join(cwd, \"PKG-INFO\"))\n\n if exists(\"build\"):\n shutil.rmtree(\"build\")\n\n for (dirpath, dirnames, filenames) in walk(MODNAME):\n for filename in filenames:\n extension = splitext(filename)[1]\n if filename.endswith((\".so\", \".pyd\", \".dll\", \".pyc\")):\n unlink(join(dirpath, filename))\n elif remove_c_files and extension in {\".c\", \".cpp\"}:\n pyx_file = str.replace(filename, extension, \".pyx\")\n # Remove the C and C++ files only when they are\n # generated from a Cython extension, because in\n # any other case, they really correspond to the\n # source code\n if exists(join(dirpath, pyx_file)):\n unlink(join(dirpath, filename))\n for ddirname in dirnames:\n if ddirname in {\"__pycache__\"}:\n shutil.rmtree(join(dirpath, ddirname))", "def process(self):", "def process(self):", "def process(self):", "def run(self):\n\n # Run bom compare if selected\n if self.root.compare_select.get():\n print('Starting BOM Compare')\n self.ccl.set_bom_compare(self.root.bom_compare_old, self.root.bom_compare_new)\n self.ccl.save_compare(self.root.bom_compare_save)\n progressbar.add_current(1)\n print('BOM Compare finished')\n # Run CCL Update\n # Note that ccl update is ran again even if already run once, could be room for improvement\n if self.root.update_select.get():\n print('Starting to update the CCL')\n self.ccl.ccl_docx = self.root.ccl_update_loc\n self.ccl.set_bom_compare(self.root.ccl_update_old, self.root.ccl_update_new)\n self.ccl.update_ccl(self.root.ccl_update_save_loc)\n print('CCL Has been updated and saved')\n progressbar.add_current(1)\n # Collect documents\n if self.root.docs_select.get():\n print('Collecting Documents')\n self.ccl.ccl_docx = self.root.docs_ccl\n self.ccl.path_checks = self.root.docs_paths\n self.ccl.path_ccl_data = self.root.docs_savedir\n self.ccl.username = self.root.docs_user\n self.ccl.password = self.root.docs_pass\n self.ccl.collect_documents(headless=self.root.headless.get())\n print('Documents have been successfully collected')\n # Progressbar progress will be updated in the filehandler module\n # Collect documents\n if self.root.ills_select.get():\n print('Starting to Collect Illustrations')\n self.ccl.ccl_docx = self.root.ill_ccl\n self.ccl.path_ccl_data = self.root.ill_scan\n self.ccl.path_illustration = self.root.ill_save\n self.ccl.collect_illustrations()\n self.ccl.insert_illustration_data(self.root.ill_cclsave)\n print('Illustrations have been collected and CCL has been updated')\n # Progressbar progress will be updated in the CCL module\n # Progress bar final update after all process has finished\n self.progressbar['value'] = progressbar.total\n self.progress_label.config(text='Done')\n print('FINISHED!')", "def _run(self):\n # We usually override this in __init__\n # pylint: disable=method-hidden\n return", "def main():\n obj = PowerMaxVolume()\n obj.perform_module_operation()", "def setup(self):\n\t\tpass", "def __gitSubmodulesSummary(self):\n self.vcs.gitSubmoduleSummary(self.project.getProjectPath())", "def run(self) -> None:\n log.critical('Not implemented')", "def postRun(self):\n pass", "def __gitSubmodulesStatus(self):\n self.vcs.gitSubmoduleStatus(self.project.getProjectPath())", "def RunModule(self):\n module = self.statModules.GetActive()\n \n self.ShutdownStatModule()\n \n if module is not None:\n wx.LogMessage(\"Running stat module...\")\n self.statPage = module.Load(self.nb, self)\n self.SetView(self.statModules.caption, self.statPage)", "def perform(self):\n pass", "def _run(self, *args, **kwargs):\n raise NotImplementedError", "def run_component(self):\n raise NotImplementedError", "def main():\n ModLoader.add_constructor(\"!ec2rlcore.module.Module\", ModLoader.ignoretag)\n\n mod_src_dir = os.path.join(os.getcwd(), \"src\")\n try:\n os.stat(mod_src_dir)\n except Exception:\n os.mkdir(mod_src_dir)\n\n try:\n for mod_file_name in os.listdir(os.path.join(root_ec2rl_dir, \"mod.d\")):\n if mod_file_name == \"ex_remediation.yaml\":\n continue\n with open(os.path.join(root_ec2rl_dir, \"mod.d\", mod_file_name), \"r\") as yamlfile:\n module = yaml.load(yamlfile, Loader=ModLoader)\n if module[\"language\"] == \"python\":\n mod_src_path = os.path.join(mod_src_dir, \"{}.py\".format(module[\"name\"]))\n with open(mod_src_path, \"w\") as pyfile:\n pyfile.write(module[\"content\"])\n print(\"Wrote: {}\".format(mod_src_path))\n print(\"Conversion complete.\")\n except Exception as ex:\n print(ex)\n print(\"Conversion failed. Please review the exception to resolve\")", "def do_one_mod(self, names: List[str], infer: bool, exec_: bool, conf: dict):\n\n p = lambda: Progress(\n TextColumn(\"[progress.description]{task.description}\", justify=\"right\"),\n BarColumn(bar_width=None),\n \"[progress.percentage]{task.percentage:>3.1f}%\",\n \"[progress.completed]{task.completed} / {task.total}\",\n TimeElapsedColumn(),\n )\n # step one collect all the modules instances we want to analyse.\n\n modules = []\n for name in names:\n x, *r = name.split(\".\")\n n0 = __import__(name)\n for sub in r:\n n0 = getattr(n0, sub)\n modules.append(n0)\n\n # step 2 try to guess the version number from the top module.\n version = getattr(modules[0], \"__version__\", \"???\")\n\n root = names[0].split(\".\")[0]\n module_conf = conf.get(root, {})\n examples_folder = module_conf.get(\"examples_folder\", None)\n print(\"EF\", examples_folder)\n if examples_folder is not None:\n examples_folder = Path(examples_folder).expanduser()\n examples_data = self.collect_examples(examples_folder)\n for edoc, figs in examples_data:\n self.examples.update(\n {k: json.dumps(v.to_json()) for k, v in edoc.items()}\n )\n for name, data in figs:\n print(\"put one fig\", name)\n self.put_raw(name, data)\n print(\"Configuration:\", json.dumps(module_conf, indent=2))\n self.root = root\n self.version = version\n subs = module_conf.get(\"submodules\", [])\n extra_from_conf = [root + \".\" + s for s in subs]\n for name in extra_from_conf:\n x, *r = name.split(\".\")\n n0 = __import__(name)\n for sub in r:\n n0 = getattr(n0, sub)\n modules.append(n0)\n\n # print(modules)\n\n collector = DFSCollector(modules[0], modules[1:])\n collected: Dict[str, Any] = collector.items()\n\n # collect all items we want to document.\n for qa, item in collected.items():\n if (nqa := full_qual(item)) != qa:\n print(\"after import qa differs : {qa} -> {nqa}\")\n if collected[nqa] == item:\n print(\"present twice\")\n del collected[nqa]\n else:\n print(\"differs: {item} != {other}\")\n\n for target in module_conf.get(\"exclude\", []):\n print(\"exclude tgt:\", target)\n del collected[target]\n # p = nullcontext\n with p() as p2:\n\n # just nice display of progression.\n taskp = p2.add_task(description=\"parsing\", total=len(collected))\n\n for qa, target_item in collected.items():\n short_description = (qa[:19] + \"..\") if len(qa) > 21 else qa\n p2.update(taskp, description=short_description.ljust(17))\n p2.advance(taskp)\n item_docstring = target_item.__doc__\n\n # TODO: we may not want tosip items as they may have children\n # right now keep modules, but we may want to keep classes if\n # they have documented descendants.\n\n if item_docstring is None and not isinstance(target_item, ModuleType):\n continue\n elif item_docstring is None and isinstance(target_item, ModuleType):\n item_docstring = \"\"\"This module has no documentation\"\"\"\n\n # progress.console.print(qa)\n try:\n if tsparse is None:\n print(\n \"please see how to install Tree-sitter in the readme to parse complex RST documents\"\n )\n arbitrary = tsparse(dedent_but_first(item_docstring).encode())\n except Exception as e:\n print(f\"TS could not parse: {qa}\")\n raise ValueError(f\"from {qa}\") from e\n arbitrary = []\n # raise\n try:\n ndoc = NumpyDocString(dedent_but_first(item_docstring))\n except Exception:\n if not isinstance(target_item, ModuleType):\n p2.console.print(\n \"Unexpected error parsing\",\n target_item,\n target_item.__name__,\n )\n if isinstance(target_item, ModuleType):\n # from .take2 import main\n # main(item_docstring)\n ndoc = NumpyDocString(\n f\"Was not able to parse docstring for {qa}\"\n )\n else:\n continue\n if not isinstance(target_item, ModuleType):\n arbitrary = []\n execute_exclude_patterns = module_conf.get(\n \"execute_exclude_patterns\", None\n )\n ex = exec_\n if execute_exclude_patterns and exec_:\n for pat in execute_exclude_patterns:\n if qa.startswith(pat):\n ex = False\n break\n # else:\n # print(\"will run\", qa)\n\n try:\n doc_blob, figs = self.do_one_item(\n target_item, ndoc, infer, ex, qa, config=module_conf\n )\n doc_blob.arbitrary = arbitrary\n except Exception:\n raise\n if module_conf.get(\"exec_failure\", None) == \"fallback\":\n print(\"Re-analysing \", qa, \"without execution\")\n # debug:\n doc_blob, figs = self.do_one_item(\n target_item, ndoc, infer, False, qa, config=module_conf\n )\n doc_blob.aliases = collector.aliases[qa]\n\n # processing....\n doc_blob.signature = doc_blob.content.pop(\"Signature\")\n try:\n for section in [\"Extended Summary\", \"Summary\", \"Notes\", \"Warnings\"]:\n if section in doc_blob.content:\n if data := doc_blob.content[section]:\n PX = P2(data)\n doc_blob.content[section] = Section(PX)\n else:\n doc_blob.content[section] = Section()\n except Exception as e:\n raise type(e)(f\"during {qa}\")\n\n doc_blob.references = doc_blob.content.pop(\"References\")\n if isinstance(doc_blob.references, str):\n if doc_blob.references == \"\":\n doc_blob.references = None\n else:\n assert False\n doc_blob.references = list(doc_blob.references)\n assert (\n isinstance(doc_blob.references, list) or doc_blob.references is None\n )\n del doc_blob.content[\"Examples\"]\n del doc_blob.content[\"index\"]\n sections_ = [\n \"Parameters\",\n \"Returns\",\n \"Raises\",\n \"Yields\",\n \"Attributes\",\n \"Other Parameters\",\n \"Warns\",\n ##\"Warnings\",\n \"Methods\",\n # \"Summary\",\n \"Receives\",\n ]\n from .take2 import Param\n\n # new_doc_blob._content[\"Parameters\"] = [\n # Parameter(a, b, c)\n # for (a, b, c) in new_doc_blob._content.get(\"Parameters\", [])\n # ]\n\n for s in sections_:\n if s in doc_blob.content:\n assert isinstance(\n doc_blob.content[s], list\n ), f\"{s}, {doc_blob.content[s]} \"\n new_content = Section()\n for param, type_, desc in doc_blob.content[s]:\n assert isinstance(desc, list)\n items = []\n if desc:\n items = P2(desc)\n new_content.append(Param(param, type_, items))\n doc_blob.content[s] = new_content\n\n doc_blob.see_also = []\n if see_also := doc_blob.content.get(\"See Also\", None):\n for nts, d0 in see_also:\n try:\n d = d0\n for (name, type_or_description) in nts:\n if type_or_description and not d:\n desc = type_or_description\n if isinstance(desc, str):\n desc = [desc]\n assert isinstance(desc, list)\n desc = paragraphs(desc)\n type_ = None\n else:\n desc = d0\n type_ = type_or_description\n assert isinstance(desc, list)\n desc = paragraphs(desc)\n\n sai = SeeAlsoItem(Ref(name, None, None), desc, type_)\n doc_blob.see_also.append(sai)\n del desc\n del type_\n except Exception as e:\n raise ValueError(\n f\"Error {qa}: {see_also=} | {nts=} | {d0=}\"\n ) from e\n del doc_blob.content[\"See Also\"]\n\n for k, v in doc_blob.content.items():\n assert isinstance(v, Section), f\"{k} is not a section {v}\"\n # end processing\n\n self.put(qa, json.dumps(doc_blob.to_json(), indent=2))\n for name, data in figs:\n self.put_raw(name, data)\n\n found = {}\n not_found = []\n for k, v in collector.aliases.items():\n if [item for item in v if item != k]:\n if shorter := find_cannonical(k, v):\n found[k] = shorter\n else:\n not_found.append((k, v))\n\n if logo := module_conf.get(\"logo\", None):\n self.put_raw(\"logo.png\", Path(logo).read_bytes())\n self.metadata = {\n \"version\": version,\n \"logo\": \"logo.png\",\n \"aliases\": found,\n \"module\": root,\n }", "def run(self) -> None:\n raise NotImplementedError()", "def run(self):\n self._make_lib_file_symbolic_links()\n self._copy_each_include_files_to_include_dir()\n self._make_dep_lib_file_sym_links_and_copy_include_files()\n self.setup_py.add_patchs_to_build_without_pkg_config(\n self.rpm.lib_dir, self.rpm.include_dir\n )\n self.setup_py.apply_and_save()\n self._build_and_install()", "def run():\n from cgl.plugins.blender.tasks.rig import parent_mdl_to_rig\n parent_mdl_to_rig()", "def run(self):\n self.cmdloop()", "def exec_module(self, module):\n pass", "def run_game_logic(self):\n pass", "def pull_submodules():\n subprocess.call([\"git\", \"submodule\", \"init\"])\n subprocess.call([\"git\", \"submodule\", \"update\", \"--recursive\", \"--remote\"])" ]
[ "0.66442853", "0.62968755", "0.6259206", "0.624318", "0.6181432", "0.61801285", "0.6152305", "0.6148519", "0.61186683", "0.61186683", "0.6116955", "0.6116955", "0.6116955", "0.6116955", "0.6116955", "0.6116955", "0.6116955", "0.6116955", "0.6116955", "0.6116955", "0.6116955", "0.6116955", "0.6116955", "0.6116955", "0.6116955", "0.61045635", "0.60868275", "0.6016773", "0.5894232", "0.5880135", "0.5877545", "0.5868885", "0.5857526", "0.5855148", "0.58546335", "0.58546335", "0.58546335", "0.58546335", "0.58546335", "0.58546335", "0.58546335", "0.58546335", "0.58546335", "0.58546335", "0.58535993", "0.5834693", "0.5826229", "0.5826229", "0.5826229", "0.5826229", "0.5826229", "0.5826229", "0.5826229", "0.5826229", "0.5826229", "0.58254987", "0.57999045", "0.575067", "0.5750185", "0.5740732", "0.5718271", "0.57014865", "0.56973267", "0.5694623", "0.5682289", "0.56673723", "0.5665367", "0.5646514", "0.5642024", "0.5642024", "0.56408477", "0.56408477", "0.56408477", "0.56408477", "0.5632304", "0.56014806", "0.55977154", "0.5591665", "0.5591665", "0.5591665", "0.55913174", "0.55720204", "0.5568205", "0.5564147", "0.5557803", "0.55571955", "0.55530304", "0.55515504", "0.5547814", "0.5538676", "0.5537759", "0.552871", "0.5511285", "0.55110115", "0.5509438", "0.5496187", "0.5492567", "0.548794", "0.54527664", "0.54464006", "0.544595" ]
0.0
-1
Add subparser for the CLI of this submodule.
def create_subparser(self, parent, storage): p = parent.add_parser( 'inject-config', help="Inject a configuration file into an OVF package", usage=self.UI.fill_usage("inject-config", [ "PACKAGE -c CONFIG_FILE [-o OUTPUT]", "PACKAGE -s SECONDARY_CONFIG_FILE [-o OUTPUT]", "PACKAGE -c CONFIG_FILE -s SECONDARY_CONFIG_FILE [-o OUTPUT]", ]), description="""Add one or more "bootstrap" configuration """ """file(s) to the given OVF or OVA.""") p.add_argument('-o', '--output', help="""Name/path of new VM package to create """ """instead of updating the existing package""") p.add_argument('-c', '--config-file', help="""Primary configuration text file to embed""") p.add_argument('-s', '--secondary-config-file', help="""Secondary configuration text file to embed """ """(currently only supported in IOS XRv for """ """admin config)""") p.add_argument('PACKAGE', help="""Package, OVF descriptor or OVA file to edit""") p.set_defaults(instance=self) storage['inject-config'] = p
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extend_cli(self, subparser):", "def add(cls, subparsers):\n subparser = subparsers.add_parser(\n name=cls.__tool_name__(),\n description=cls.__get_description__())\n\n cls.__add_arguments__(subparser)\n subparser.set_defaults(func=cls.from_args)\n return subparser", "def refine_cli(\n subparsers: SubParsersAction,\n parent_parsers: List[argparse.ArgumentParser],\n) -> None:", "def add_subcommand(\n subp: _SubParsersAction, name: str, description: Optional[str] = None, **kwargs\n ) -> \"CommandParser\":\n desc_help = {\"description\": description, \"help\": description}\n return subp.add_parser(name, **desc_help, **kwargs)", "def add_argparse_subparser(subparsers):\n\n new_sub_parser = subparsers.add_parser(\n PluginManager.argparse_subparser_name(), help=\"plugin commands\"\n )\n PluginManager.__argparse_subparser = new_sub_parser\n plugin_subparsers = new_sub_parser.add_subparsers(\n dest=PluginManager.__root_subparser_name\n )\n\n sub_sub_parser = plugin_subparsers.add_parser(\n \"list\", help=\"list the available plugins\"\n )\n sub_sub_parser.add_argument(\n \"--all\",\n dest=\"show_all\",\n action=\"store_true\",\n default=False,\n help=\"show all loaded plugins (default is False)\",\n )\n sub_sub_parser.add_argument(\n dest=\"list_filter\",\n default=None,\n help=\"filter\",\n nargs=\"?\",\n type=PluginManager.__list_filter_type,\n )\n sub_sub_parser = plugin_subparsers.add_parser(\n \"info\", help=\"information on a specific plugin\"\n )\n sub_sub_parser.add_argument(\n dest=\"info_filter\",\n default=None,\n type=PluginManager.__info_filter_type,\n help=\"an id\",\n )", "def extend_cli(self, root_subparsers):\n\n user_dict = {}\n if self.add_base_groups:\n user_dict = dict(shared_groups=SHARED_GROUPS)\n\n self.specification = SpecParser.from_plugin(\n subparser=root_subparsers,\n plugin=self.plugin,\n base_groups=user_dict)", "def _add_to_cli(self, parser, group=None):\n dest = self.dest\n if group is not None:\n dest = group.name + '_' + dest\n\n subparsers = parser.add_subparsers(dest=dest,\n title=self.title,\n description=self.description,\n help=self.help)\n # NOTE(jd) Set explicitly to True for Python 3\n # See http://bugs.python.org/issue9253 for context\n subparsers.required = True\n\n if self.handler is not None:\n self.handler(subparsers)", "def addSubParser( parentParser, cmdName ) :\n parser = parentParser.add_parser( cmdName, help='Generate a new UBOS package scaffold.' )\n parser.add_argument( '--directory', required=True, help='Directory where to create the package scaffold')\n parser.add_argument( '--template', required=True, help='Name of the template to use' )\n parser.add_argument( '--json', required=False, help='Settings file' )", "def add_subparser(sp, name, **kwargs):\n kwargs[\"add_help\"] = False\n kwargs['formatter_class'] = ap.ArgumentDefaultsHelpFormatter\n sparser = sp.add_parser(name, **kwargs)\n\n sparser.add_argument(\"-h\", \"--help\", action=custom_help(),\n help=\"print the short or long help\")\n\n return sparser", "def add_cli(self, subparser):\n new_parser = subparser.add_parser('create', help='create new scratch file')\n new_parser.add_argument('name', nargs='?', default=None, help=\"Optional Name to be given to the file, \"\n \"default name is an increment of 'scratch##'\")\n new_parser.set_defaults(func=self.action)\n return subparser", "def setup_subparser(name, description, commands):\n subparser = SUBPARSER.add_parser(\n name,\n help=description\n )\n subparser.add_argument(\n 'sub_command',\n metavar='sub_command',\n type=str,\n nargs='+',\n help='Which command to run. Options: %s' % ', '.join(commands),\n choices=commands\n )\n\n return subparser", "def create_parser(self, prog_name, subcommand):\n parser = CommandParser(\n self, prog=\"%s %s\" % (os.path.basename(prog_name), subcommand),\n description=self.help or None,\n )\n parser.add_argument(\n '--pythonpath',\n help='A directory to add to the Python path',\n )\n self.add_arguments(parser)\n return parser", "def add_subparser(subparsers):\n parser = subparsers.add_parser('run', help=\"run artifact\")\n parser.add_argument('run_config', default='cps.ini',\n help=\"run configuration file\")\n parser.add_argument('-p', '--persist', action=\"store_true\", default=False,\n help=\"to persist data, dont delete sandbox after use\")\n\n parser.set_defaults(func=main)", "def create_parser(self, prog_name, subcommand):\r\n self.prog_name = \"{} {}\".format(prog_name, subcommand)\r\n return super(TrackedCommand, self).create_parser(prog_name, subcommand)", "def customize_subparser(cls, subparser: argparse.ArgumentParser) -> None:\n pass", "def subparser( parser, subparsers ):", "def SubParser(self):\n if not self._sub_parser:\n # pylint: disable=protected-access\n self._sub_parser = self._parser.add_subparsers(\n action=parser_extensions.CloudSDKSubParsersAction,\n calliope_command=self)\n return self._sub_parser", "def add_subcommands(cls, parser: argparse.ArgumentParser) -> None:\n if cls.SUBCOMMANDS:\n subparsers = parser.add_subparsers(title=\"subcommands\", metavar=\"\", dest='cmd')\n for subcmd_class in cls.SUBCOMMANDS:\n parsers = subcmd_class.get_args()\n subcmd_class.parser = parsers[-1]\n\n subparser = subparsers.add_parser(\n subcmd_class.NAMES[0],\n aliases=subcmd_class.NAMES[1:],\n parents=parsers,\n help=subcmd_class.HELP,\n epilog=subcmd_class.EPILOG)\n subparser.set_defaults(command_class=subcmd_class)\n subcmd_class.customize_subparser(subparser)", "def create_parser(self, prog_name, subcommand):\r\n # Hack __main__ so --help in dev_appserver_main works OK.\r\n sys.modules['__main__'] = dev_appserver_main\r\n return super(Command, self).create_parser(prog_name, subcommand)", "def get_parser(subparsers, parent=None):\n parser = subparsers.add_parser(\n \"flow\",\n description=\"Invoke ML on MCU flow\",\n parents=[parent] if parent else [],\n add_help=(parent is None),\n )\n parser.set_defaults(func=handle)\n add_common_options(parser)\n add_context_options(parser)\n add_flow_options(parser)\n subparsers = parser.add_subparsers(dest=\"subcommand2\") # this line changed\n load.get_parser(subparsers)\n tune.get_parser(subparsers)\n build.get_parser(subparsers)\n compile_.get_parser(subparsers)\n run.get_parser(subparsers)", "def add_subparser(subparsers):\n parser = subparsers.add_parser(\"utils/update\",\n description=\"Update apt and the groot_ansible ecosystem\", # this shows in the help for this command\n help=\"update your ansible/apt environment\", # this shows in the parent parser\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n common.add_ansible_arguments(parser)\n parser.set_defaults(func=parse_args)", "def _add_to_cli(self, parser, group=None):\n container = self._get_argparse_container(parser, group)\n kwargs = self._get_argparse_kwargs(group)\n prefix = self._get_argparse_prefix('', group.name if group else None)\n deprecated_names = []\n for opt in self.deprecated_opts:\n deprecated_name = self._get_deprecated_cli_name(opt.name,\n opt.group)\n if deprecated_name is not None:\n deprecated_names.append(deprecated_name)\n self._add_to_argparse(parser, container, self.name, self.short,\n kwargs, prefix,\n self.positional, deprecated_names)", "def fill_subparsers(subparsers):\n\n\t\tfor cls in BaseCommand.__subclasses__():\n\t\t\tcls.add_subparser(subparsers)", "def init_parser(subparsers):\n parser = subparsers.add_parser(COMMAND, help=\"Add a new task to the task list\")\n parser.add_argument(\"title\", type=str, help=\"The title of the new task\")\n parser.add_argument(\"description\", type=str, help=\"The description of the new task\")\n doto.cli.cmd.task.init_task_flags(parser)", "def create_parser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(title=\"Commands\", dest=\"subparser_name\")\n subparsers.add_parser(\"generate-settings\", help=\"Generate settings.json to install \"\n \"Gluu Cloud Native Edition non-interactively\")\n subparsers.add_parser(\"install\", help=\"Install Gluu Cloud Native Edition using Kustomize. Depreciated > 4.3.\")\n subparsers.add_parser(\"install-no-wait\", help=\"Install Gluu Cloud Native Edition using Kustomize. \"\n \"Depreciated > 4.3. \"\n \"There will be no wait time between installing services. \"\n \"Pods may look like they are restarting but they will \"\n \"be waiting for hierarchy \"\n \"pods to be running\")\n subparsers.add_parser(\"install-ldap-backup\", help=\"Install ldap backup cronjob only.\")\n subparsers.add_parser(\"restore\", help=\"Install Gluu Cloud Native Edition with a \"\n \"running database and previous configuration using Kustomize.\"\n \"Depreciated > 4.3\")\n subparsers.add_parser(\"uninstall\", help=\"Uninstall Gluu that was installed using Kustomize. \"\n \"Depreciated > 4.3\")\n subparsers.add_parser(\"upgrade\", help=\"Upgrade Gluu Cloud Native Edition using Kustomize. Depreciated > 4.3\")\n subparsers.add_parser(\"upgrade-values-yaml\", help=\"Upgrade Gluu Cloud Native Edition\")\n subparsers.add_parser(\"install-couchbase\", help=\"Install Couchbase only. Used with installation of Gluu with Helm\")\n subparsers.add_parser(\"install-couchbase-backup\", help=\"Install Couchbase backup only.\")\n subparsers.add_parser(\"uninstall-couchbase\", help=\"Uninstall Couchbase only.\")\n subparsers.add_parser(\"helm-install\", help=\"Install Gluu Cloud Native Edition using helm. \"\n \"This also installs the nginx-ingress chart\")\n subparsers.add_parser(\"helm-uninstall\", help=\"Uninstall Gluu Cloud Native Edition using helm. \"\n \"This also uninstalls the nginx-ingress chart\")\n\n subparsers.add_parser(\"helm-install-gluu\", help=\"Install Gluu Cloud Native Edition using helm. \"\n \"This assumes nginx-ingress is installed\")\n subparsers.add_parser(\"helm-uninstall-gluu\", help=\"Uninstall Gluu Cloud Native Edition using helm. \"\n \"This only uninstalls Gluu\")\n subparsers.add_parser(\"version\", help=\"Outputs version of pygluu installer.\")\n return parser", "def add_args_to_subparser(the_parser, subcommand_name):\n\n the_parser.add_argument(CmdArgs.verbose_optional, help=CmdArgs.verbose_help,\n action='store_true',\n )\n\n if subcommand_name in DCA_VISUALIZATION_SUBCOMMANDS:\n the_parser.add_argument(CmdArgs.biomolecule, help=CmdArgs.biomolecule_help)\n the_parser.add_argument(CmdArgs.pdb_chain_id, help=CmdArgs.pdb_chain_id_help)\n the_parser.add_argument(CmdArgs.pdb_file, help=CmdArgs.pdb_file_help)\n the_parser.add_argument(CmdArgs.refseq_file, help=CmdArgs.refseq_file_help)\n the_parser.add_argument(CmdArgs.dca_file, help=CmdArgs.dca_file_help)\n the_parser.add_argument(CmdArgs.rna_secstruct_file_optional,\n help=CmdArgs.rna_secstruct_file_help,\n )\n the_parser.add_argument(CmdArgs.linear_dist_optional,\n help=CmdArgs.linear_dist_help, type = int,\n )\n the_parser.add_argument(CmdArgs.contact_dist_optional,\n help=CmdArgs.contact_dist_help, type = float,\n )\n the_parser.add_argument(CmdArgs.num_dca_contacts_optional,\n help = CmdArgs.num_dca_contacts_help, type = int,\n )\n the_parser.add_argument(CmdArgs.wc_neighbor_dist_optional, type= int,\n help = CmdArgs.wc_neighbor_dist_help,\n )\n the_parser.add_argument(CmdArgs.pdb_id_optional, help = CmdArgs.pdb_id_help)\n\n if subcommand_name in FILE_CONTENT_SUBCOMMANDS:\n if subcommand_name == 'pdb_content':\n the_parser.add_argument(CmdArgs.pdb_file, help = CmdArgs.pdb_file_help)\n if subcommand_name in MSA_TRIMMING_SUBCOMMANDS:\n the_parser.add_argument(CmdArgs.max_gap_optional,\n type = float, help = CmdArgs.max_gap_help,\n )\n if subcommand_name == 'trim_by_refseq':\n the_parser.add_argument(CmdArgs.biomolecule, help=CmdArgs.biomolecule_help)\n the_parser.add_argument(CmdArgs.msa_file, help=CmdArgs.msa_file_help)\n the_parser.add_argument(CmdArgs.refseq_file, help=CmdArgs.refseq_file_help)\n the_parser.add_argument(CmdArgs.remove_all_gaps_optional,\n help= CmdArgs.remove_all_gaps_help, action='store_true',\n )\n if subcommand_name == 'trim_by_gap_size':\n the_parser.add_argument(CmdArgs.msa_file, help=CmdArgs.msa_file_help)\n return None", "def create_parser(self, prog_name, subcommand):\n parser = CommandParser(\n self, prog=\"%s %s\" % (os.path.basename(prog_name), subcommand),\n description=self.help or None,\n )\n parser.add_argument(\n '--version', action='version', version=self.get_version())\n\n self.add_arguments(parser)\n return parser", "def configure_cli(subparsers) -> None: # type: ignore\n parser = subparsers.add_parser(\n name='items',\n description='Retrieve new/updated items from Aleph and send to CaiaSoft'\n )\n parser.set_defaults(cmd_name='items')", "def add_subparsers(cls, parser, name=\"\", prefixes=[], delim=\"_\", title=\"commands\", description=\"available commands\", required=True):\n\t\tcommand = f\"command_{name}\"\n\t\tif command in cls.COMMANDS:\n\t\t\traise CommandParserNameDuplicated(f\"Command parser with name {name} already registered.\")\n\t\t\n\t\tcls.COMMANDS[command] = {}\n\t\t\n\t\tsub = parser.add_subparsers(title=title, dest=command, description=description)\n\t\tsub.required = True\n\t\tfor pf in prefixes:\n\t\t\tfor c, method in cls.get_commands(prefix=pf, delim=delim):\n\t\t\t\tcls.set_subparser_for(c, method, sub)\n\t\t\t\tcls.COMMANDS[command][c] = method", "def register(\n self,\n root_parser: argparse.ArgumentParser,\n subparsers: argparse._SubParsersAction,\n ) -> None:\n self.root_parser = root_parser\n parser = subparsers.add_parser(\n self.name,\n aliases=self.aliases,\n help=self.help,\n description=self.help,\n add_help=self.add_help,\n )\n parser.set_defaults(command=self)\n self.configure(parser)", "def add_arg_parser(subparsers):\n # add\n add_p = subparsers.add_parser('add', description='Create a bundle from a .csv, .tsv, or a directory of files.')\n add_p.add_argument('-t', '--tag', nargs=1, type=str, action='append',\n help=\"Set one or more tags: 'dsdt add -t authoritative:True -t version:0.7.1'\")\n add_p.add_argument('bundle', type=str, help='The destination bundle in the current context')\n add_p.add_argument('path_name', type=str, help='File or directory of files to add to the bundle', action='store')\n add_p.set_defaults(func=lambda args: _add(args))", "def add_subparser(\n subparsers: SubParsersAction, parents: List[argparse.ArgumentParser]\n) -> None:\n run_parser = subparsers.add_parser(\n \"run\",\n parents=parents,\n conflict_handler=\"resolve\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Starts a Rasa server with your trained model.\",\n )\n run_parser.set_defaults(func=run)\n\n run_subparsers = run_parser.add_subparsers()\n sdk_subparser = run_subparsers.add_parser(\n \"actions\",\n parents=parents,\n conflict_handler=\"resolve\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n help=\"Runs the action server.\",\n )\n sdk_subparser.set_defaults(func=run_actions)\n\n arguments.set_run_arguments(run_parser)\n arguments.set_run_action_arguments(sdk_subparser)", "def create_parser(self, prog_name, subcommand):\r\n return OptionParser(prog=prog_name,\r\n usage=self.usage(subcommand),\r\n version='',\r\n add_help_option = False,\r\n option_list=self.option_list)", "def add_subcommand(self, command):\n\n if self.subcommand_parser is None:\n self.subcommand_parser = self.parser.add_subparsers(\n dest='command', help='Please select one command mode below',\n title='Command modes'\n )\n self.subcommands = {}\n\n if not isinstance(command, ScriptCommand):\n raise ScriptError('Subcommand must be a ScriptCommand instance')\n\n parser = self.subcommand_parser.add_parser(\n command.name,\n help=command.short_description,\n description=command.description,\n epilog=command.epilog,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n self.subcommands[command.name] = command\n command.script = self\n\n if callable(getattr(command, '__register_arguments__', None)):\n command.__register_arguments__(parser)\n\n return parser", "def init_parser(self):\n usage = \"%s [options] [subcommand]\" % (self.command)\n description = \"An SBTools test plug-in.\"\n\n parser = SBToolOptionParser(self, self.sbtools, usage, description=description)\n return parser", "def make_adder(self, *args, **kwargs):\n kwargs.setdefault(\"dest\", \"subcmd\")\n subp = self.add_subparsers(*args, **kwargs)\n return partial(self.add_subcommand, subp)", "def register_cli(cls):\n for cmd in cls.SUB_GROUP_COMMANDS:\n getattr(cls, cls.SUB_GROUP_NAME).add_command(getattr(cls, cmd))", "def add_parser(subparsers):\n parser = subparsers.add_parser('upload', help='upload build to Koji')\n\n parser.add_argument('--scm-url', required=True,\n help='SCM URL for this build, eg. git://...')\n parser.add_argument('--owner', required=True,\n help='koji user name that owns this build')\n parser.add_argument('--tag',\n help='tag this build, eg. ceph-3.2-xenial-candidate')\n parser.add_argument('--dryrun', action='store_true',\n help=\"Show what would happen, but don't do it\")\n parser.add_argument('--skip-log', action='store_true',\n help=\"Do not upload a .build log file\")\n parser.add_argument('directory', help=\"parent directory of a .dsc file\")\n parser.set_defaults(func=main)", "def add_parser(subparsers) -> None:\n contest_parser = subparsers.add_parser(\n 'contest', help='build contest files')\n mut_ex_group = contest_parser.add_mutually_exclusive_group()\n mut_ex_group.add_argument(\n '-p', '--pdf', action='store_true', default=False, help='generate contest PDFs')\n mut_ex_group.add_argument('-i', '--io', action='store_true',\n default=False, help='generate contest input/output files')\n contest_parser.add_argument(\n 'problem_dir', help='path to problem(s)', nargs='+')\n contest_parser.add_argument(\n 'contest_dir', help='directory which the contest will be saved')\n contest_parser.set_defaults(function=lambda options: process_contest(\n options.problem_dir, options.contest_dir, options.pdf, options.io))", "def build_cli(self):\n parser = argparse.ArgumentParser(\"xsgen\",\n conflict_handler='resolve', argument_default=NotSpecified)\n for plugin in self.plugins:\n plugin.update_argparser(parser)\n self.parser = parser\n return parser", "def add_subcommand(self, cmd):\n self.subcommands.append(cmd)", "def _init_cfg_subcmd(subparsers):\n cfg_related = subparsers.add_parser(\"cfg\",)\n cfg_subparsers = cfg_related.add_subparsers(dest=\"cfg_subcommand\")\n\n cfg_write_parser = cfg_subparsers.add_parser(\"write\")\n cfg_write_parser.add_argument(\n \"--level\",\n choices=[\"user\", \"cwd\"],\n default=None,\n help=\"Specify if this config is for user or just the working directory.\",\n )\n cfg_write_parser.add_argument(\n \"--open\", action=\"store_const\", const=True, default=False\n )\n cfg_subparsers.add_parser(\"show\")\n\n cfg_export_parser = cfg_subparsers.add_parser(\"export\")\n cfg_export_parser.add_argument(\"--dir\", default=os.getcwd())\n\n return cfg_related", "def _make_parser(self, **kwargs):\n\n kwargs.setdefault('help', self.help)\n kwargs.setdefault('formatter_class',argparse.RawDescriptionHelpFormatter)\n kwargs.setdefault('description', self.description)\n kwargs.setdefault('name', self.name)\n names = (kwargs.get('name') or self.name).split('.')\n \n def _get_subparser(a):\n if a._subparsers:\n for action in a._subparsers._actions:\n if isinstance(action, argparse._SubParsersAction):\n return action\n raise RuntimeError('could not find adequate subparser')\n return a.add_subparsers(dest='command',\n title='commands',\n metavar='COMMAND')\n def _get_parser(node, idx, names):\n name = names[idx]\n if name in node.choices:\n return node.choices[name]\n args = {\n 'name' : name,\n 'help' : 'a group of sub-commands',\n }\n return node.add_parser(**args)\n \n parser = ACMD_PARSER\n node = _get_subparser(parser)\n\n for i,n in enumerate(names[:-1]):\n node = _get_subparser(parser)\n parser = _get_parser(node, i, names)\n \n node = _get_subparser(parser)\n kwargs['name'] = names[-1]\n parser = node.add_parser(**kwargs)\n return parser", "def _init_cfg_subcmd(subparsers):\n cfg_related = subparsers.add_parser(\"cfg\")\n cfg_subparsers = cfg_related.add_subparsers(dest=\"cfg_subcommand\")\n\n cfg_write_parser = cfg_subparsers.add_parser(\"write\")\n cfg_write_parser.add_argument(\n \"--level\",\n choices=[\"user\", \"cwd\"],\n default=None,\n help=\"Specify if this config is for user or just the working directory.\",\n )\n cfg_write_parser.add_argument(\n \"--open\", action=\"store_const\", const=True, default=False\n )\n cfg_subparsers.add_parser(\"show\")\n\n cfg_export_parser = cfg_subparsers.add_parser(\"export\")\n cfg_export_parser.add_argument(\"--dir\", default=os.getcwd())\n\n return cfg_related", "def add_new_subparser(subparsers, formatter_class=RawTextHelpFormatter):\n # TODO: add info on no args to description or help\n # Adds custom --help argument\n generic_parent_parser = cmd.argparse.get_generic_parent_parser()\n new_description = 'Create a new test module or page object'\n new_help = new_description\n new_parser = subparsers.add_parser(\n 'new', description=new_description, help=new_help,\n parents=[generic_parent_parser],\n formatter_class=formatter_class,\n add_help=False, epilog=cmd.argparse.ARGPARSE_EPILOG\n )\n # New <type> subparsers\n new_type_desc = 'Run \\'{} <type> --help\\' for details'.format(new_parser.prog)\n new_subparsers = new_parser.add_subparsers(\n title='File Types', description=new_type_desc, dest='type', metavar='<type>'\n )\n # New test parser\n new_test_parent_parser = get_new_parent_parser(\n parents=[generic_parent_parser], class_name_metavar='<TestCaseClass>',\n class_name_help='Name to use for the initial test case class'\n )\n new_test_description = 'Create a new test module'\n new_test_help = new_test_description\n new_subparsers.add_parser(\n 'test', description=new_test_description, help=new_test_help,\n parents=[new_test_parent_parser],\n add_help=False, epilog=cmd.argparse.ARGPARSE_EPILOG\n )\n # New page object parser\n new_page_parent_parser = get_new_parent_parser(\n parents=[generic_parent_parser], class_name_metavar='<PageObjectClass>',\n class_name_help='Name to use for the initial page object class'\n )\n new_page_description = 'Create a new page object module'\n new_page_help = new_page_description\n new_page_parser = new_subparsers.add_parser(\n 'page', description=new_page_description, help=new_page_help,\n parents=[new_page_parent_parser],\n add_help=False, epilog=cmd.argparse.ARGPARSE_EPILOG\n )\n # TODO: add optional --prototype arg with a list of valid page object prototype classes\n return new_parser", "def init_cli(self):\n self.cli_parser.add_cmd(\"completions\", \"Return a list of completions for autocomplete\",\n self.cli_completions)\n\n self.cli_parser.add_cmd(\"disable-plugin\", \"Disable the named plugin\",\n self.cli_disable_plugin,\n \"plugin_name\", \"Name of plugin to disable\")\n\n self.cli_parser.add_cmd(\"enable-plugin\", \"Enables the named plugin\",\n self.cli_enable_plugin,\n \"plugin_name\", \"Name of the plugin to enable\")\n\n self.cli_parser.add_cmd(\"help\", \"Displays this message\",\n self.cli_parser.get_help)\n\n self.cli_parser.add_cmd(\"kill-plugin\", \"Kills the named plugin\",\n self.cli_kill_plugin,\n \"plugin_name\", \"Name of the plugin to kill\")\n\n self.cli_parser.add_cmd(\"load-config\", \"Reloads a configuration from file\",\n self.cli_load_config,\n \"config_path\", \"Path of the configuration file\")\n\n self.cli_parser.add_cmd(\"load-plugin\", \"Loads (or reloads) a plugin from disk\",\n self.cli_load_plugin,\n \"plugin_name\", \"Name of the plugin to load\")\n\n self.cli_parser.add_cmd(\"list-plugins\", \"List all loaded plugins\",\n self.cli_list_plugins)\n\n self.cli_parser.add_cmd(\"start-plugin\", \"Start the named plugin\",\n self.cli_start_plugin,\n \"plugin_name\", \"Name of the plugin to start\")\n\n self.cli_parser.add_cmd(\"stop-plugin\", \"Stop the named plugin\",\n self.cli_stop_plugin,\n \"plugin_name\", \"Name of the plugin to stop\")\n\n self.cli_parser.add_cmd(\"stop-all-plugins\", \"Stop all plugins\",\n self.cli_stop_all_plugins)\n\n self.cli_parser.add_cmd(\"restart-plugin\", \"Restarts the named plugin\",\n self.cli_restart_plugin,\n \"plugin_name\", \"Name of the plugin to restart\")\n\n self.cli_parser.add_cmd(\"unload-plugin\", \"Unloads the named plugin\",\n self.cli_unload_plugin,\n \"plugin_name\", \"Name of the plugin to unload\")", "def set_subparser_for(cls, command, method, subparser):\n\n\t\tdef add_pos_argument(sub, label, arg):\n\t\t\tif arg[\"type\"] == bool:\n\t\t\t\traise CommandTypeError(\"bool type not supported as positional argument\")\n\t\t\tif \"value\" in arg:\n\t\t\t\tif arg[\"type\"] in [str, int, float]:\n\t\t\t\t\tsub.add_argument(label, nargs='?', default=arg[\"value\"], type=arg[\"type\"], help=arg[\"help_line\"])\n\t\t\telif \"values\" in arg:\n\t\t\t\tif arg[\"type\"] in [str, int, float]:\n\t\t\t\t\tsub.add_argument(label, nargs='?', default=arg[\"values\"][0], choices=arg[\"values\"], type=arg[\"type\"], help=arg[\"help_line\"])\n\t\t\t\telif arg[\"type\"] == list:\n\t\t\t\t\tsub.add_argument(label, nargs='+', default=arg[\"values\"][0], choices=arg[\"values\"], help=arg[\"help_line\"])\n\t\t\telse:\n\t\t\t\tsub.add_argument(label, type=arg[\"type\"], help=arg[\"help_line\"])\n\n\t\tdef add_opt_argument(sub, label, arg, add_alias=True):\n\t\t\tif arg[\"type\"] == bool:\n\t\t\t\tif add_alias:\n\t\t\t\t\tsub.add_argument(arg[\"alias\"], arg[\"name\"], action=\"store_true\", default=False, help=arg[\"help_line\"])\n\t\t\t\telse:\n\t\t\t\t\tsub.add_argument(arg[\"name\"], action=\"store_true\", default=False, help=arg[\"help_line\"])\n\n\t\t\telif arg[\"type\"] in [str, int, float] and \"value\" in arg:\n\t\t\t\tif add_alias:\n\t\t\t\t\tsub.add_argument(arg[\"alias\"], arg[\"name\"], type=arg[\"type\"], default=arg[\"value\"], help=arg[\"help_line\"])\n\t\t\t\telse:\n\t\t\t\t\tsub.add_argument(arg[\"name\"], type=arg[\"type\"], default=arg[\"value\"], help=arg[\"help_line\"])\n\t\t\telif arg[\"type\"] == list and \"values\" not in arg:\n\t\t\t\tsub.add_argument(label, nargs=\"*\", help=arg[\"help_line\"])\n\t\t\telif \"values\" in arg:\n\t\t\t\tif arg[\"type\"] == list:\n\t\t\t\t\tsub.add_argument(label, choices=arg[\"values\"], default=arg[\"values\"][0], nargs=\"*\", help=arg[\"help_line\"])\n\t\t\t\telse:\n\t\t\t\t\tsub.add_argument(label, type=arg[\"type\"], choices=arg[\"values\"], default=arg[\"values\"][0], nargs=\"?\", help=arg[\"help_line\"])\n\t\t\telse:\n\t\t\t\tif add_alias:\n\t\t\t\t\tsub.add_argument(arg[\"alias\"], arg[\"name\"], type=arg[\"type\"], help=arg[\"help_line\"])\n\t\t\t\telse:\n\t\t\t\t\tsub.add_argument(arg[\"name\"], type=arg[\"type\"], help=arg[\"help_line\"])\n\t\t\n\t\tfunc = getattr(cls, method)\n\n\t\targs_info = cls.__parse_docstring(func.__doc__)\n\t\tif args_info == {}:\n\t\t\treturn\n\n\t\tc = subparser.add_parser(command, help=args_info[\"help_line\"])\n\n\t\tif \"arguments\" in args_info:\n\t\t\tfor label, arg in args_info[\"arguments\"].items():\n\t\t\t\tif arg[\"pos\"]:\n\t\t\t\t\tadd_pos_argument(c, label, arg)\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tadd_opt_argument(c, label, arg, add_alias=True)\n\t\t\t\t\texcept ArgumentError as e:\n\t\t\t\t\t\tadd_opt_argument(c, label, arg, add_alias=False)", "def calcs_parser(subparser, name, help_msg):\n parser = subparser.add_parser(name,\\\n help=help_msg)\n parser = add_arg(parser, 'parallel')\n parser = add_arg(parser, 'xvars')\n parser = add_arg(parser, 'time')\n parser = add_arg(parser, 'modif_time')\n parser = add_arg(parser, 'modif_coord')\n parser = add_arg(parser, 'modif_var')\n parser = add_arg(parser, 'eswitch')\n parser = add_arg(parser, 'fswitch')\n parser.add_argument(\"args\", metavar='SELAFIN file', nargs=\"+\")\n\n return subparser", "def setup_subcommands(argparser):\n\n subparsers = argparser.add_subparsers()\n\n parser_info = subparsers.add_parser('info', help = 'Provide the information about the user')\n parser_info.add_argument('user', help = 'The user to inspect')\n\n parser_ownerships = subparsers.add_parser('ownerships', help = 'Show items which this user owns')\n parser_ownerships.add_argument('user', help = 'The name of the user to show information about')\n parser_ownerships.add_argument('-r', '--recursive', action = 'store_true', help = 'Show items which this user own through being in lists')\n \n parser_info.set_defaults(handler = show_info)\n parser_ownerships.set_defaults(handler = show_ownerships)", "def create_parser(self, prog_name, subcommand):\n return OptionParser(prog=prog_name,\n usage=self.usage(subcommand),\n version=self.get_version(),\n option_list=self.option_list)", "def test_create_subparser_noop(self):\n self.command.create_subparser()", "def create_parser():\n parser = argparse.ArgumentParser(\n description='CLI for SMS',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n # Add subcommands\n subparsers = parser.add_subparsers(title='subcommands', dest='cmd')\n\n # Downlink Unitdata\n downlink_unitdata_parser = subparsers.add_parser(\n 'DU', help=\"Send downlink unitdata to SMSOrc8rGW service\",\n )\n downlink_unitdata_parser.add_argument('imsi', help='e.g. 001010000090122 (no prefix required)')\n downlink_unitdata_parser.add_argument('data', help='Data as a hex string e.g. 1fc13a00')\n downlink_unitdata_parser.set_defaults(func=send_downlink_unitdata)\n\n return parser", "def add_cmd(self, name: str, help_str: str, cmd_fn: typing.Callable, arg: str = None, arg_help: str = None):\n self.cmd_names.append(name)\n cmd = self.cli_subparsers.add_parser(name, help=help_str)\n cmd.set_defaults(func=cmd_fn)\n if arg is not None:\n cmd.add_argument(arg, help=arg_help)", "def load_into(subparser, as_cmd=None):\n p = subparser\n p.description = description\n\n if not as_cmd:\n as_cmd = default_name\n out = cli.CommandSuite(as_cmd, p)\n out.load_subcommand(topics)\n return out", "def add_arguments(self, sub_parser):\n sp = sub_parser", "def cli():\n pass # do nothing here, it just defines the name for other subcommands", "def build_subcommands_parser(parser, module):\n mdefs = module.__dict__\n keys = list(mdefs.keys())\n keys.sort()\n subparsers = parser.add_subparsers(help='sub-command help')\n for command in keys:\n if command.startswith('pub_'):\n func = module.__dict__[command]\n parser = subparsers.add_parser(command[4:], help=func.__doc__)\n parser.set_defaults(func=func)\n argspec = inspect.signature(func)\n positionals = []\n short_opts = set([])\n for arg in argspec.parameters.values():\n if arg.default == inspect.Parameter.empty:\n positionals += [arg]\n else:\n param_name = arg.name.replace('_', '-')\n short_opt = param_name[0]\n if not (param_name.startswith('no') or\n (short_opt in short_opts)):\n opts = ['-%s' % short_opt, '--%s' % param_name]\n else:\n opts = ['--%s' % param_name]\n short_opts |= set([short_opt])\n if isinstance(arg.default, list):\n parser.add_argument(*opts, action='append')\n elif isinstance(arg.default, dict):\n parser.add_argument(*opts, type=json.loads)\n elif arg.default is False:\n parser.add_argument(*opts, action='store_true')\n elif arg.default is not None:\n parser.add_argument(*opts, default=arg.default)\n else:\n parser.add_argument(*opts)\n if positionals:\n for arg in positionals[:-1]:\n parser.add_argument(arg.name)\n parser.add_argument(positionals[-1].name, nargs='*')", "def add_complete_hic_subparser(subparsers):\n parser = subparsers.add_parser(\"hic-complete\", help=\"HiFive HiC Project Complete Analysis Function: Create all necessary files (Fend, Data, and Project) and learn correction parameters for a HiFive HiC project.\", epilog=\"For command line options of each normalization approach, type: %(prog)s <ALGORITHM> -h\")\n subparser2 = parser.add_subparsers(dest='algorithm')\n prob_parser = subparser2.add_parser(\"probability\", help=\"A probability model based approach for learning correction values associated with each fend. Learning is accomplished using gradient descent.\")\n exp_parser = subparser2.add_parser(\"express\", help=\"An appoximation based approach for learning correction values associated with each fend. Learning is accomplished using a variation of matrix balancing.\")\n bin_parser = subparser2.add_parser(\"binning\", help=\"A multivariate binning probability model-based approach for learning correction values associated with fend characteristics. Learning is accomplished using the Broyden-Fletcher-Goldfarb-Shanno algorithm.\")\n binprob_parser = subparser2.add_parser(\"binning-probability\", help=\"A chained-correction approach first learning fend characteristic corrections and applying them prior to learning fend-associated correction values via a probability model.\")\n binexp_parser = subparser2.add_parser(\"binning-express\", help=\"A chained-correction approach first learning fend characteristic corrections and applying them prior to learning fend-associated correction values via a matrix-balancing approximation.\")\n add_complete_hic_group(prob_parser)\n add_hic_probability_group(prob_parser)\n add_complete_hic_group(exp_parser)\n add_hic_express_group(exp_parser)\n add_complete_hic_group(bin_parser)\n add_hic_binning_group(bin_parser)\n add_complete_hic_group(binprob_parser)\n add_hic_probability_group(binprob_parser)\n add_hic_binning_group(binprob_parser)\n add_complete_hic_group(binexp_parser)\n add_hic_express_group(binexp_parser)\n add_hic_binning_group(binexp_parser)\n return", "def add_hicnormalize_subparser(subparsers):\n parser = subparsers.add_parser(\"hic-normalize\", help=\"HiFive HiC Project Normalization Function: Learn correction parameters for a HiFive HiC project.\", epilog=\"For command line options of each normalization approach, type: %(prog)s <ALGORITHM> -h\")\n subparser2 = parser.add_subparsers(dest='algorithm')\n prob_parser = subparser2.add_parser(\"probability\", help=\"A probability model based approach for learning correction values associated with each fend. Learning is accomplished using gradient descent.\")\n exp_parser = subparser2.add_parser(\"express\", help=\"An appoximation based approach for learning correction values associated with each fend. Learning is accomplished using a variation of matrix balancing.\")\n bin_parser = subparser2.add_parser(\"binning\", help=\"A multivariate binning probability model-based approach for learning correction values associated with fend characteristics. Learning is accomplished using the Broyden-Fletcher-Goldfarb-Shanno algorithm.\")\n binprob_parser = subparser2.add_parser(\"binning-probability\", help=\"A chained-correction approach first learning fend characteristic corrections and applying them prior to learning fend-associated correction values via a probability model.\")\n binexp_parser = subparser2.add_parser(\"binning-express\", help=\"A chained-correction approach first learning fend characteristic corrections and applying them prior to learning fend-associated correction values via a matrix-balancing approximation.\")\n add_hic_normalize_group(prob_parser)\n add_hic_probability_group(prob_parser)\n add_hic_normalize_group(exp_parser)\n add_hic_express_group(exp_parser)\n exp_parser.add_argument(\"-f\", \"--min-interactions\", dest=\"minint\", required=False, type=int, default=20,\n action='store', help=\"The minimum number of interactions for fend filtering, if refiltering is required due to distance cutoff or selected reads. [default: %(default)s]\")\n add_hic_normalize_group(bin_parser)\n add_hic_binning_group(bin_parser)\n add_hic_normalize_group(binprob_parser)\n add_hic_probability_group(binprob_parser)\n add_hic_binning_group(binprob_parser)\n add_hic_normalize_group(binexp_parser)\n add_hic_express_group(binexp_parser)\n binexp_parser.add_argument(\"-f\", \"--min-interactions\", dest=\"minint\", required=False, type=int, default=20,\n action='store', help=\"The minimum number of interactions for fend filtering, if refiltering is required due to distance cutoff or selected reads. [default: %(default)s]\")\n add_hic_binning_group(binexp_parser)\n return", "def register_argument_parser(add_parser, action):\n sub_command = str(action)\n return add_parser(sub_command,\n help=f'{sub_command} token',\n description=f'{sub_command.capitalize()} a Waiter token. '\n 'In addition to the optional arguments '\n 'explicitly listed below, '\n 'you can optionally provide any Waiter '\n 'token parameter as a flag. For example, '\n 'to specify 10 seconds for the '\n 'grace-period-secs parameter, '\n 'you can pass --grace-period-secs 10. '\n 'You can also provide nested fields separated by a period. For example, '\n 'to specify an environment variable FOO as \\\"bar\\\", you can pass --env.FOO \\\"bar\\\".')", "def add_parser(subp, raw):\n tmpp = subp.add_parser('mkmodel', help='create a model file',\n formatter_class=raw,\n description=textwrap.dedent(DESC))\n tmpp.add_argument('model', type=str, metavar='MODELNAME',\n help='name of the model')\n tmpp.add_argument('type', type=str, choices=['ssa', 'sde'],\n help='model type')\n tmpp.add_argument('--nspecs', type=positive_type,\n help='number of species')\n tmpp.add_argument('--nreacs', type=positive_type,\n help='number of reactions')\n tmpp.add_argument('--dim', type=positive_type,\n help='dimension of phase space')\n tmpp.add_argument('-z', type=str, choices=['row', 'col'],\n help='state change matrix format')", "def set_parser(*, collected, parser=None):\n if parser is None:\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n commands = unique(collected)\n for name, details in commands.items():\n original = details.original\n args = details.extra\n a_subparser = subparsers.add_parser(name)\n a_subparser.set_defaults(\n __gather_name__=name,\n __gather_command__=original,\n )\n for arg_details in args:\n a_subparser.add_argument(*arg_details.args, **dict(arg_details.kwargs))\n return parser", "def make_parser():\n parser = argparse.ArgumentParser(prog=__file__.replace(\".py\", \"\"),\n description='simple $PATH tool')\n parser.add_argument('-n', '--nocolor', dest=\"color\",\n action=\"store_false\", default=True,\n help='Turn off ANSI color codes.')\n parser.add_argument('-w', '--nowarn', action=\"store_true\",\n help='Turn off path warnings.')\n subs = parser.add_subparsers(title='subcommands',\n description='The subcommands')\n\n sub = subs.add_parser('replace', description=\"Search & Replace $PATH\")\n sub.set_defaults(cmd='path_replace')\n sub.add_argument('terms', nargs='+',\n help='Format: search:replace, search:replace, ...')\n\n sub = subs.add_parser('show', description=\"Show $PATH compoents\")\n sub.set_defaults(cmd='path_show')\n sub.add_argument('-n', '--nocolor', dest=\"color\",\n action=\"store_false\", default=True,\n help='Turn off ANSI color codes.')\n sub.add_argument('-w', '--nowarn', action=\"store_true\",\n help='Turn off path warnings.')\n\n sub = subs.add_parser('which', description=\"Platform agnostic `which -a`\")\n sub.set_defaults(cmd='path_which')\n sub.add_argument('look', help='Look for this executable')\n sub.add_argument('-n', '--nocolor', dest=\"color\",\n action=\"store_false\", default=True,\n help='Turn off ANSI color codes.')\n sub.add_argument('-v', '--version', action=\"store_true\",\n help='Show version of exact matches.')\n\n return parser", "def argparse_subparser_name():\n return \"plugins\"", "def cli_add_command(parent_parser):\n parser = parent_parser.add_parser(\"setup\", description=cli_add_command.__doc__)\n\n def run_it(_):\n from .util import print_important_env\n print_important_env()\n\n log.info(\"AKRR Setup\")\n\n _config_setup()\n setup()\n\n parser.set_defaults(func=run_it)", "def register_subcommand(nest_ops_subparsers):\n parser = nest_ops_subparsers.add_parser('compile', \\\n help=COMPILE_CMD_HELP, \\\n description=COMPILE_CMD_DESCRIPTION, \\\n formatter_class=argparse.RawTextHelpFormatter )\n\n parser.add_argument('--code_type', \\\n choices=VALID_CODE_TYPES, \\\n help=CODE_TYPE_ARG_HELP, \\\n default='all' \\\n )\n\n parser.add_argument('--project', \\\n help=\"\"\"Which project to build. Only affects the web_assets:dist\n code_type, where it determines which project's index.html\n will be the main entry point index.html in the static files.\"\"\", \\\n choices=nest_envs.VALID_PROJECT_NAMES, \\\n default=nest_envs.DEFAULT_PROJECT_NAME, \\\n )\n \n parser.add_argument('--runlevel', \\\n help='Determines the run level for logging, error checking, etc.',\n choices=nest_envs.VALID_RUNLEVEL_NAMES,\n default=nest_envs.DEFAULT_RUNLEVEL_NAME, \\\n )\n\n #super ugly callback mechanism from argparse\n parser.set_defaults(func=_run_compile_cmd)\n return", "def add_argparse_args(cls, parent_parser: ArgumentParser, **kwargs) -> ArgumentParser:\n return add_argparse_args(cls, parent_parser, **kwargs)", "def get_command_line_parser():\n command_line_parser = argparse.ArgumentParser(\n description=\"Execute data workflows defined in flo.yaml files\",\n )\n subcommand_creator = command_line_parser.add_subparsers(\n title='SUBCOMMANDS',\n )\n for command_module in COMMAND_MODULES:\n command = command_module.Command(subcommand_creator)\n\n # this sets a default value for the command \"option\" so\n # that, when this Command is selected by argparse from the\n # command line, we know which comman instance it\n # corresponds with. See run_subcommand function below.\n command.option_parser.set_defaults(command=command)\n return command_line_parser", "def import_additional_parser():\n # Parse arguments\n try:\n global add_parser\n import add_parser\n except ImportError as e:\n print('No additional parser found.')\n pass", "def arg_parser(self):\n print\n\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n\n status = subparsers.add_parser('status', help=self.viewer.status.__doc__)\n status.set_defaults(func=self.viewer.status)\n\n view_catalog = subparsers.add_parser('catalog', help=self.viewer.view_full_catalog.__doc__)\n view_catalog.set_defaults(func=self.viewer.view_full_catalog)\n\n view_tags = subparsers.add_parser('tags', help=self.viewer.view_all_tags.__doc__)\n view_tags.add_argument('-a', '--alpha', action='store_true', help=\"List tags alphabetically\")\n view_tags.set_defaults(func=self.viewer.view_all_tags)\n\n search = subparsers.add_parser('search', help=self.viewer.search_tags.__doc__)\n search.add_argument('-t', '--tags', nargs='+', required=True, help=\"Tags to search for\")\n search.add_argument('-o', action='store_true', help=\"Search using 'or' logic ('and' logic by default)\")\n search.set_defaults(func=self.viewer.search_tags) \n\n add_file = subparsers.add_parser('addfile', help=self.editor.add_files.__doc__)\n add_file.add_argument('filename', help=\"Name of file to add catalog\")\n add_file.set_defaults(func=self.editor.add_files)\n\n add_tags = subparsers.add_parser('addtags', help=self.editor.add_tag.__doc__)\n add_tags.add_argument('filename', help=\"Name of file to add catalog\")\n add_tags.add_argument('-t', '--tags', nargs='+', help=\"Tags to add to catalog\")\n add_tags.set_defaults(func=self.editor.add_tag)\n\n edit_entry = subparsers.add_parser('edit', help=self.editor.edit_entry.__doc__)\n edit_entry.add_argument('filename', help=\"Name of file to add catalog\")\n edit_entry.set_defaults(func=self.editor.edit_entry)\n\n clean_catalog = subparsers.add_parser('clean', help=self.editor.clean_catalog.__doc__)\n clean_catalog.add_argument('-t', '--tags', nargs='+', help=\"Tags to be deleted from catalog entirely\")\n clean_catalog.set_defaults(func=self.editor.clean_catalog)\n\n delete_entry = subparsers.add_parser('delete', help=self.editor.delete_entry.__doc__)\n delete_entry.add_argument('filename', help=\"File from which to delete specified tags\")\n delete_entry.add_argument('-t', '--tags', nargs='+', help=\"Tags to be deleted\")\n delete_entry.set_defaults(func=self.editor.delete_entry)\n\n merge_tags = subparsers.add_parser('merge', help=self.editor.merge_tags.__doc__)\n merge_tags.add_argument('--source', required=True, help=\"File from which tags are being taken\")\n merge_tags.add_argument('--dest', required=True, help=\"Destination file to which tags from --source are added\")\n merge_tags.set_defaults(func=self.editor.merge_tags)\n\n \n args = parser.parse_args()\n\n args.func(args)", "def register_command(subparser):\n update_parser = subparser.add_parser('update', help='Run the log files through an updater. Used to update '\n 'between versions of autology')\n update_parser.set_defaults(func=_main)\n\n # Arguments\n update_parser.add_argument('-f', '--files', help='Update the files that are currently defined in the log '\n 'directories', action='store_true')\n update_parser.add_argument('-t', '--templates', help='Install a new output template', action='store_true')\n update_parser.add_argument('-T', '--template-definition', help='Define a template definition to install',\n default=template_utilities.DEFAULT_TEMPLATES_URL)", "def add_commands(subparsers, path):\n log.debug('importing %s', path)\n try:\n del sys.modules[path]\n except KeyError:\n pass\n try:\n package = importlib.import_module(path)\n except Exception as e:\n log.warning('failed to import commands package %s',\n path, exc_info=True)\n return\n log.debug('commands package: %s', path)\n for (finder, name, ispkg) in pkgutil.iter_modules(package.__path__):\n if ispkg:\n continue\n try:\n command = importlib.import_module('.' + name, path)\n except Exception as e:\n log.warning('failed to import %s command: %s',\n path, name, exc_info=True)\n continue\n if not getattr(command, 'run', None):\n log.warning('skipping command module without run function: %s',\n name)\n continue\n log.debug('command: %s'%(name))\n name = command.__name__.split('.')[-1]\n parser_help = getattr(command, 'parser_help', None)\n if parser_help is None:\n log.warning('command %s missing help text'%(command.__name__))\n parser = subparsers.add_parser(name, help=parser_help)\n command.add_arguments(parser)\n parser.set_defaults(run=command.run)", "def create_parser():\n helpdict = create_parser.helpdict\n # Customized usage, for more verbosity concerning these subparsers options.\n usage = \"\"\"%(prog)s [-h] [--version] {run,info} ... \"\"\"\n usage += tw.dedent(\"\"\"\\n\n From more help on each of the subcommands, type:\n %(prog)s run -h\n %(prog)s info -h\\n\\n\"\"\")\n\n # parser = ap.ArgumentParser(\n #parser = MpArgumentParser(\n #formatter_class=ap.ArgumentDefaultsHelpFormatter,\n #description='Monte Python, a Monte Carlo code in Python',\n #usage=usage)\n parser = initialise_parser(\n description='Monte Python, a Monte Carlo code in Python', usage=usage)\n\n # -- add the subparsers\n subparser = parser.add_subparsers(dest='subparser_name')\n\n ###############\n # run the MCMC\n runparser = add_subparser(subparser, 'run', help=\"run the MCMC chains\")\n\n # -- number of steps (OPTIONAL)\n runparser.add_argument('-N', help=helpdict['N'], type=positive_int,\n dest='N')\n # -- output folder (OBLIGATORY)\n runparser.add_argument('-o', '--output', help=helpdict['o'], type=str,\n dest='folder')\n # -- parameter file (OBLIGATORY)\n runparser.add_argument('-p', '--param', help=helpdict['p'],\n type=existing_file, dest='param')\n # -- covariance matrix (OPTIONAL)\n runparser.add_argument('-c', '--covmat', help=helpdict['c'],\n type=existing_file, dest='cov')\n # -- jumping method (OPTIONAL)\n runparser.add_argument('-j', '--jumping', help=helpdict['j'],\n dest='jumping', default='fast',\n choices=['global', 'sequential', 'fast'])\n # -- sampling method (OPTIONAL)\n runparser.add_argument('-m', '--method', help=helpdict['m'],\n dest='method', default='MH',\n choices=['MH', 'NS', 'PC', 'CH', 'IS', 'Der', 'Fisher'])\n # -- update Metropolis Hastings (OPTIONAL)\n runparser.add_argument('--update', help=helpdict['update'], type=int,\n dest='update', default=50)\n # -- update Metropolis Hastings with an adaptive jumping factor (OPTIONAL)\n runparser.add_argument('--superupdate', help=helpdict['superupdate'], type=int,\n dest='superupdate', default=0)\n # -- superupdate acceptance rate argument (OPTIONAL)\n runparser.add_argument('--superupdate-ar', help=helpdict['superupdate-ar'], type=float,\n dest='superupdate_ar', default=0.26)\n # -- superupdate acceptance rate tolerance argument (OPTIONAL)\n runparser.add_argument('--superupdate-ar-tol', help=helpdict['superupdate-ar-tol'], type=float,\n dest='superupdate_ar_tol', default=0.01)\n # -- adaptive jumping factor Metropolis Hastings (OPTIONAL)\n runparser.add_argument('--adaptive', help=helpdict['adaptive'], type=int,\n dest='adaptive', default=0)\n # -- adaptive ts argument (OPTIONAL)\n runparser.add_argument('--adaptive-ts', help=helpdict['adaptive-ts'], type=int,\n dest='adaptive_ts', default=1000)\n\n # -- jumping factor (OPTIONAL)\n runparser.add_argument('-f', help=helpdict['f'], type=float,\n dest='jumping_factor', default=2.4)\n # -- temperature (OPTIONAL)\n runparser.add_argument('-T', help=helpdict['T'], type=float,\n dest='temperature', default=1.0)\n # -- minimize (OPTIONAL)\n runparser.add_argument('--minimize', help=helpdict['minimize'],\n action='store_true')\n # -- minimize argument, minimization tolerance (OPTIONAL)\n runparser.add_argument('--minimize-tol', help=helpdict['minimize-tol'], type=float,\n dest='minimize_tol', default=0.00001)\n # -- fisher (OPTIONAL)\n runparser.add_argument('--fisher', help=helpdict['fisher'],\n action='store_true')\n # -- fisher argument (OPTIONAL)\n runparser.add_argument('--fisher-asymmetric', help=helpdict['fisher-asymmetric'],\n dest='fisher_asymmetric',action='store_true')\n # -- fisher step iteration (OPTIONAL)\n runparser.add_argument('--fisher-step-it', help=helpdict['fisher-step-it'],\n dest='fisher_step_it', default=10)\n # -- fisher step iteration argument, -deltaloglkl target (OPTIONAL)\n runparser.add_argument('--fisher-delta', help=helpdict['fisher-delta'], type=float,\n dest='fisher_delta', default=0.1)\n # -- fisher step iteration argument, -deltaloglkl tolerance (OPTIONAL)\n runparser.add_argument('--fisher-tol', help=helpdict['fisher-tol'], type=float,\n dest='fisher_tol', default=0.05)\n # -- fisher symmetric likelihood assumption threshold (OPTIONAL)\n runparser.add_argument('--fisher-sym-lkl', help=helpdict['fisher-sym-lkl'], type=float,\n dest='fisher_sym_lkl', default=0.1)\n # -- configuration file (OPTIONAL)\n runparser.add_argument('--conf', help=helpdict['conf'],\n type=str, dest='config_file',\n default='default.conf')\n # -- arbitrary numbering of an output chain (OPTIONAL)\n runparser.add_argument('--chain-number', help=helpdict['chain-number'])\n # -- stop run after first successful update using --update (EXPERIMENTAL)\n runparser.add_argument('--stop-after-update', help=helpdict['stop-after-update'],\n dest='stop_after_update', action='store_true')\n # display option\n runparser.add_argument('--display-each-chi2', help=helpdict['display-each-chi2'],\n dest='display_each_chi2', action='store_true')\n # -- parallel chains without MPI (OPTIONAL)\n runparser.add_argument('--parallel-chains', help=helpdict['parallel-chains'],\n action='store_true')\n\n ###############\n # MCMC restart from chain or best fit file\n runparser.add_argument('-r', '--restart', help=helpdict['r'],\n type=existing_file, dest='restart')\n runparser.add_argument('-b', '--bestfit', dest='bf', help=helpdict['b'],\n type=existing_file)\n\n ###############\n # Silence the output (no print on the console)\n runparser.add_argument('--silent', help=helpdict['silent'],\n action='store_true')\n ###############\n # Adding new derived parameters to a run\n runparser.add_argument(\n '--Der-target-folder', dest=\"Der_target_folder\",\n help=helpdict['Der-target-folder'], type=str, default='')\n runparser.add_argument(\n '--Der-param-list', dest='derived_parameters',\n help=helpdict['Der-param-list'], type=str, default='', nargs='+')\n\n ###############\n # Importance Sampling Arguments\n runparser.add_argument(\n '--IS-starting-folder', dest='IS_starting_folder',\n help=helpdict['IS-starting-folder'], type=str, default='', nargs='+')\n\n ###############\n # We need the following so the run does not crash if one of the external\n # samplers is not correctly installed despite not being used\n from contextlib import contextmanager\n import sys, os\n\n @contextmanager\n def suppress_stdout():\n with open(os.devnull, \"w\") as devnull:\n old_stdout = sys.stdout\n sys.stdout = devnull\n try:\n yield\n finally:\n sys.stdout = old_stdout\n\n ###############\n # MultiNest arguments (all OPTIONAL and ignored if not \"-m=NS\")\n # The default values of -1 mean to take the PyMultiNest default values\n try:\n with suppress_stdout():\n from MultiNest import NS_prefix, NS_user_arguments\n NSparser = runparser.add_argument_group(\n title=\"MultiNest\",\n description=\"Run the MCMC chains using MultiNest\"\n )\n for arg in NS_user_arguments:\n NSparser.add_argument('--'+NS_prefix+arg,\n default=-1,\n **NS_user_arguments[arg])\n except ImportError:\n # Not defined if not installed\n pass\n except:\n warnings.warn('PyMultiNest detected but MultiNest likely not installed correctly. '\n 'You can safely ignore this if not running with option -m NS')\n\n ###############\n # PolyChord arguments (all OPTIONAL and ignored if not \"-m=PC\")\n # The default values of -1 mean to take the PyPolyChord default values\n try:\n with suppress_stdout():\n from PolyChord import PC_prefix, PC_user_arguments\n PCparser = runparser.add_argument_group(\n title=\"PolyChord\",\n description=\"Run the MCMC chains using PolyChord\"\n )\n for arg in PC_user_arguments:\n PCparser.add_argument('--'+PC_prefix+arg,\n default=-1,\n **PC_user_arguments[arg])\n except ImportError:\n # Not defined if not installed\n pass\n except:\n warnings.warn('PyPolyChord detected but PolyChord likely not installed correctly. '\n 'You can safely ignore this if not running with option -m PC')\n\n ###############\n # CosmoHammer arguments (all OPTIONAL and ignored if not \"-m=CH\")\n # The default values of -1 mean to take the CosmoHammer default values\n try:\n with suppress_stdout():\n from cosmo_hammer import CH_prefix, CH_user_arguments\n CHparser = runparser.add_argument_group(\n title=\"CosmoHammer\",\n description=\"Run the MCMC chains using the CosmoHammer framework\")\n for arg in CH_user_arguments:\n CHparser.add_argument('--'+CH_prefix+arg,\n default=-1,\n **CH_user_arguments[arg])\n except ImportError:\n # Not defined if not installed\n pass\n except:\n warnings.warn('CosmoHammer detected but emcee likely not installed correctly. '\n 'You can safely ignore this if not running with option -m CH')\n\n ###############\n # Information\n infoparser = add_subparser(subparser, 'info',\n help=\"analyze the MCMC chains\")\n\n # -- folder to analyze\n infoparser.add_argument('files', help=helpdict['files'],\n nargs='+')\n # Silence the output (no print on the console)\n infoparser.add_argument('--silent', help=helpdict['silent'],\n action='store_true')\n # -- to only write the covmat and bestfit, without computing the posterior\n infoparser.add_argument('--minimal', help=helpdict['minimal'],\n action='store_true')\n # -- number of bins (defaulting to 20)\n infoparser.add_argument('--bins', help=helpdict['bins'],\n type=int, default=20)\n # -- temperature (OPTIONAL)\n infoparser.add_argument('-T', help=helpdict['T'], type=float,\n dest='temperature', default=1.0)\n # -- deprecated: remove the mean-likelihood line\n infoparser.add_argument('--no-mean', help=helpdict['no-mean'],\n dest='mean_likelihood_old', action='store_false')\n # -- plot the mean-likelihood line\n infoparser.add_argument('--plot-mean', help=helpdict['plot-mean'],\n dest='mean_likelihood', action='store_true')\n # -- to remove the mean and 68% limits on top of each 1D plot\n infoparser.add_argument('--short-title-1d', help=helpdict['short-title-1d'],\n dest='short_title_1d', action='store_true')\n # -- possible plot file describing custom commands\n infoparser.add_argument('--extra', help=helpdict['extra'],\n dest='optional_plot_file', default='')\n # -- if you just want the covariance matrix, use this option\n infoparser.add_argument('--noplot', help=helpdict['noplot'],\n dest='plot', action='store_false')\n # -- if you just want to output 1d posterior distributions (faster)\n infoparser.add_argument('--noplot-2d', help=helpdict['noplot-2d'],\n dest='plot_2d', action='store_false')\n # -- if you just want to output triangle with 2d contours\n infoparser.add_argument('--noplot-2d-diag', help=helpdict['noplot-2d-diag'],\n dest='plot_diag', action='store_false')\n # -- when plotting 2d posterior distribution, use contours and not contours\n # filled (might be useful when comparing several folders)\n infoparser.add_argument('--contours-only', help=helpdict['contours-only'],\n dest='contours_only', action='store_true')\n # -- if you want to output every single subplots\n infoparser.add_argument('--all', help=helpdict['all'], dest='subplot',\n action='store_true')\n # -- to change the extension used to output files (pdf is the default one,\n # but takes long, valid options are png and eps)\n infoparser.add_argument('--ext', help=helpdict['ext'],\n type=str, dest='extension', default='pdf')\n # -- to set manually the number of plots per hoorizontal raw in 1d plot\n infoparser.add_argument('--num-columns-1d', help=helpdict['num-columns-1d'],\n type=int, dest='num_columns_1d')\n # -- also analyze the non-markovian part of the chains\n infoparser.add_argument('--keep-non-markovian', help=helpdict['keep-non-markovian'],\n dest='markovian', action='store_false')\n # -- force only analyzing the markovian part of the chains\n infoparser.add_argument('--keep-only-markovian', help=helpdict['keep-only-markovian'],\n dest='only_markovian', action='store_true')\n # -- fraction of chains to be analyzed after burn-in removal (defaulting to 1.0)\n infoparser.add_argument('--keep-fraction', help=helpdict['keep-fraction'],\n type=float, dest='keep_fraction', default=1.0)\n # -- calculate the covariant matrix when analyzing the chains\n infoparser.add_argument('--want-covmat', help=helpdict['want-covmat'],\n dest='want_covmat', action='store_true')\n # -------------------------------------\n # Further customization\n # -- fontsize of plots (defaulting to 16)\n infoparser.add_argument('--fontsize', help=helpdict['fontsize'],\n type=int, default=16)\n # -- ticksize of plots (defaulting to 14)\n infoparser.add_argument('--ticksize', help=helpdict['ticksize'],\n type=int, default=14)\n # -- linewidth of 1d plots (defaulting to 4, 2 being a bare minimum for\n # legible graphs\n infoparser.add_argument('--line-width', help=helpdict['line-width'],\n type=int, default=4)\n # -- number of decimal places that appear on the tick legend. If you want\n # to increase the number of ticks, you should reduce this number\n infoparser.add_argument('--decimal', help=helpdict['decimal'], type=int,\n default=3)\n # -- number of ticks that appear on the graph.\n infoparser.add_argument('--ticknumber', help=helpdict['ticknumber'],\n type=int, default=3)\n # -- legend type, to choose between top (previous style) to sides (new\n # style). It modifies the place where the name of the variable appear.\n infoparser.add_argument('--legend-style', help=helpdict['legend-style'],\n type=str, choices=['sides', 'top'],\n default='sides')\n # -- width of gaussian smoothing for plotting posteriors,\n # in units of bin size, increase for smoother data.\n infoparser.add_argument('--gaussian-smoothing', help=helpdict['gaussian-smoothing'],\n type=float, default=0.5)\n # interpolation factor for plotting posteriors, 1 means no interpolation,\n # increase for smoother curves (it means that extra bins are created\n # and interpolated between computed bins)\n infoparser.add_argument('--interpolation-smoothing', help=helpdict['interpolation-smoothing'],\n type=int, default=4)\n # -- plot Fisher ellipses\n infoparser.add_argument('--plot-fisher', help=helpdict['plot-fisher'],\n dest='plot_fisher',action='store_true')\n infoparser.add_argument('--center-fisher', help=helpdict['center-fisher'],\n dest='center_fisher',action='store_true')\n\n infoparser.add_argument('--posterior-smoothing', help=helpdict['posterior-smoothing'],\n type=int, default=5)\n\n return parser", "def setup_args() -> argparse.ArgumentParser:\n main_parser = argparse.ArgumentParser(prog=\"gh\")\n subparsers = main_parser.add_subparsers(dest=\"subparser\")\n command_parser = subparsers.add_parser(\"commands\", help=\"Runs a command\")\n command_parser.add_argument(\n \"choice\",\n help=\"The chosen command to run\",\n choices=gh.commands.OPTIONS.keys(),\n )\n analytics_parser = subparsers.add_parser(\"analytics\", help=\"Runs an analysis\")\n analytics_parser.add_argument(\n \"choice\",\n help=\"The chosen analysis to run\",\n choices=gh.analytics.OPTIONS.keys(),\n )\n return main_parser", "def vultr_cli(parser):\n return parser", "def parser(subparsers, repo):\n desc = 'show status of the repo'\n status_parser = subparsers.add_parser(\n 'status', help=desc, description=desc.capitalize(), aliases=['st'])\n status_parser.add_argument(\n 'paths', nargs='*', help='the specific path(s) to status',\n action=helpers.PathProcessor, repo=repo)\n status_parser.set_defaults(func=main)", "def add_hic_interval_subparser(subparsers):\n parser = subparsers.add_parser(\"hic-interval\",\n help=\"HiFive Binning Function: Create a tabular interaction file containing data from a HiFive HiC project. Data are a genomic-interval format (chr1 start1 stop1 chr2 start2 stop2).\")\n parser.add_argument(\"-c\", \"--chromosome\", dest=\"chrom\", default=None, required=True, type=str,\n help=\"The chromosome from which to pull interaction data from.\")\n parser.add_argument(\"--chromosome2\", dest=\"chrom2\", default=None, required=False, type=str,\n help=\"The second chromosome from which to pull interaction data from if pulling trans data.\")\n parser.add_argument(\"-s\", \"--start\", dest=\"start\", default=None, required=False, type=int,\n help=\"The start coordinate of the pulled region to return. (None indicates the first valid bin on the chromosome) [default %(default)s]\")\n parser.add_argument(\"-e\", \"--stop\", dest=\"stop\", default=None, required=False, type=int,\n help=\"The stop coordinate of the pulled region to return. (None indicates the last valid bin on the chromosome) [default %(default)s]\")\n parser.add_argument(\"--start2\", dest=\"start2\", default=None, required=False, type=int,\n help=\"The start coordinate of the second chromosome pulled region to return. (None indicates the first valid bin on the chromosome) [default %(default)s]\")\n parser.add_argument(\"--stop2\", dest=\"stop2\", default=None, required=False, type=int,\n help=\"The stop coordinate of the second chromosome pulled region to return. (None indicates the last valid bin on the chromosome) [default %(default)s]\")\n parser.add_argument(\"-b\", \"--binsize\", dest=\"binsize\", default=10000, type=int,\n help=\"The size of bins, in base pairs, to group data into. [default: %(default)s]\")\n parser.add_argument(\"-m\", \"--max-distance\", dest=\"maxdist\", default=None, type=int,\n help=\"The maximum interaction distance to return (None indicates no maximum). [default: %(default)s]\")\n parser.add_argument(\"-d\", \"--data-type\", dest=\"datatype\", default=\"fend\",\n help=\"Which corrections (if any) to apply to counts. [default: %(default)s]\",\n choices=[\"raw\", \"fend\", \"distance\", \"enrichment\", \"expected\"])\n parser.add_argument(\"-M\", \"--matrix\", dest=\"matrix\", default=False, action=\"store_true\",\n help=\"Store output as a tab-separated matrix of values.\")\n parser.add_argument(\"-y\", \"--dynamically-bin\", dest=\"dynamic\", default=False, action=\"store_true\",\n help=\"Dynamically bin heatmap.\")\n parser.add_argument(\"-x\", \"--expansion-binsize\", dest=\"expbinsize\", default=10000, type=int,\n help=\"The size of bins, in base pairs, to group data into for expanding under-populated bins. [default: %(default)s]\")\n parser.add_argument(\"-f\", \"--minobservations\", dest=\"minobs\", default=20, type=int,\n help=\"The minimum number of observed reads in a bin for it to be considered valid. [default: %(default)s]\")\n parser.add_argument(\"-a\", \"--search-distance\", dest=\"search\", default=0, type=int,\n help=\"The furthest distance from the bin minpoint to expand bounds. If set to zero, there is no limit on expansion distance. [default: %(default)s]\")\n parser.add_argument(\"-v\", \"--remove-failed\", dest=\"remove\", default=False, action=\"store_true\",\n help=\"If a non-zero 'search-distance' is given, it is possible for a bin not to meet the 'minobservations' criteria before stopping looking. If this occurs and 'remove-failed' is set, the observed and expected values for that bin are zero.\")\n parser.add_argument(\"-i\", \"--image-file\", dest=\"image\", default=None, type=str,\n help=\"Save the data as an image to this file.\")\n parser.add_argument(\"-p\", \"--pdf\", dest=\"pdf\", default=False, action=\"store_true\",\n help=\"Format the image in PDF format. [default: %(default)s]\")\n parser.add_argument(\"-r\", \"--rotate\", dest=\"rotate\", default=False, action=\"store_true\",\n help=\"Rotate the plot 45 degrees (cis binned only). [default: %(default)s]\")\n parser.add_argument(\"-t\", \"--ticks\", dest=\"ticks\", default=False, action=\"store_true\",\n help=\"Add tick marks and labels to the plot (pdf format and binned only). [default: %(default)s]\")\n parser.add_argument(\"-l\", \"--legend\", dest=\"legend\", default=False, action=\"store_true\",\n help=\"Add color scale to the plot (pdf format only). [default: %(default)s]\")\n parser.add_argument(\"-k\", \"--keyword\", dest=\"keywords\", default=[], type=str, action='append',\n help=\"Additional keyword arguments to pass to plotting function.\")\n add_silent_argument(parser)\n parser.add_argument(dest=\"project\", type=str,\n help=\"The name of a HiFive HiC project file to pull data from.\")\n parser.add_argument(dest=\"output\", type=str,\n help=\"The name of the file to write HiC interval to.\")\n return", "def get_parser():\n parser = ArgumentParser(description=__doc__,\n formatter_class=ArgumentDefaultsHelpFormatter,\n prog='pv2')\n subparsers = parser.add_subparsers(dest='cmd')\n # subparsers.add_parser('selfcheck',\n # add_help=False,\n # help=\"Self-check of the sst toolkit.\")\n # parser.add_argument('--version',\n # action='version',\n # version=('sst %s' % str(sst.__version__)))\n subparsers.add_parser('eval',\n add_help=False,\n parents=[evaluate.get_parser()],\n formatter_class=ArgumentDefaultsHelpFormatter,\n help=(\"Evaluate a single image\"))\n subparsers.add_parser('train',\n add_help=False,\n parents=[train.get_parser()],\n formatter_class=ArgumentDefaultsHelpFormatter,\n help=(\"Train a new model.\"))\n subparsers.add_parser('plot',\n add_help=False,\n parents=[plot.get_parser()],\n formatter_class=ArgumentDefaultsHelpFormatter,\n help=(\"Plot summary information.\"))\n return parser", "def add_list_parser(subparsers):\n list_parser = subparsers.add_parser(\"list\")\n list_parser.set_defaults(func=list_cli.main)\n list_parser.add_argument('--scenarios', '-s',\n dest=\"scenarios\",\n action='store_true',\n help='List Scenarios')", "def create_parser(general_defaults={}, constants={}, subcommand=MAIN):\n\n defaults = general_defaults['BigMLer']\n\n version = pkg_resources.require(\"BigMLer\")[0].version\n version_text = \"\"\"\\\nBigMLer %s - A Higher Level API to BigML's API\nCopyright 2012-2015 BigML\n\nLicensed under the Apache License, Version 2.0 (the \\\"License\\\"); you may\nnot use this file except in compliance with the License. You may obtain\na copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \\\"AS IS\\\" BASIS, WITHOUT\nWARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\nLicense for the specific language governing permissions and limitations\nunder the License.\"\"\" % version\n constants['version_text'] = version_text\n main_parser = argparse.ArgumentParser(\n description=\"A higher level API to BigML's API.\",\n epilog=\"Happy predictive modeling!\",\n version=version_text,\n formatter_class=argparse.RawTextHelpFormatter)\n subparsers = main_parser.add_subparsers()\n\n # list of options\n common_options = get_common_options(defaults=defaults, constants=constants)\n delete_options = get_delete_options(defaults=defaults)\n source_options = get_source_options(defaults=defaults)\n dataset_options = get_dataset_options(defaults=defaults)\n test_options = get_test_options(defaults=defaults)\n multi_label_options = get_multi_label_options(defaults=defaults)\n\n # subcommand options\n subcommand_options = {}\n # specific options\n subcommand_options[\"main\"] = get_main_options(defaults=defaults,\n constants=constants)\n # general options\n subcommand_options[\"main\"].update(common_options)\n subcommand_options[\"main\"].update(source_options)\n subcommand_options[\"main\"].update(dataset_options)\n subcommand_options[\"main\"].update(multi_label_options)\n subcommand_options[\"main\"].update(test_options)\n subcommand_options[\"main\"].update({\n '--source-tag': delete_options['--source-tag'],\n '--dataset-tag': delete_options['--dataset-tag'],\n '--model-tag': delete_options['--model-tag'],\n '--ensemble-tag': delete_options['--ensemble-tag'],\n '--prediction-tag': delete_options['--prediction-tag'],\n '--batch-prediction-tag': delete_options['--batch-prediction-tag']})\n\n main_options = subcommand_options[\"main\"]\n\n defaults = general_defaults[\"BigMLer analyze\"]\n subcommand_options[\"analyze\"] = get_analyze_options(defaults=defaults)\n subcommand_options[\"analyze\"].update(common_options)\n # we add the options that should be transmitted to bigmler main subcommands\n # in analyze\n subcommand_options[\"analyze\"].update({\n '--objective': main_options['--objective'],\n '--max-parallel-models': main_options['--max-parallel-models'],\n '--max-parallel-evaluations': main_options[\n '--max-parallel-evaluations'],\n '--model-fields': main_options['--model-fields'],\n '--balance': main_options['--balance'],\n '--no-balance': main_options['--no-balance'],\n '--number-of-models': main_options['--number-of-models'],\n '--sample-rate': main_options['--sample-rate'],\n '--replacement': main_options['--replacement'],\n '--missing-splits': main_options['--missing-splits'],\n '--pruning': main_options['--pruning'],\n '--weight-field': main_options['--weight-field'],\n '--replacement': main_options['--replacement'],\n '--objective-weights': main_options['--objective-weights'],\n '--replacement': main_options['--replacement'],\n '--model-attributes': main_options['--model-attributes'],\n '--ensemble-attributes': main_options['--ensemble-attributes'],\n '--tlp': main_options['--tlp'],\n '--randomize': main_options['--randomize']})\n\n defaults = general_defaults[\"BigMLer cluster\"]\n subcommand_options[\"cluster\"] = get_cluster_options(defaults=defaults)\n # general options\n subcommand_options[\"cluster\"].update(common_options)\n subcommand_options[\"cluster\"].update(source_options)\n subcommand_options[\"cluster\"].update(dataset_options)\n subcommand_options[\"cluster\"].update(test_options)\n subcommand_options[\"cluster\"].update({\n '--cpp': main_options['--cpp'],\n '--fields-map': main_options['--fields-map'],\n '--source-tag': delete_options['--source-tag'],\n '--dataset-tag': delete_options['--dataset-tag'],\n '--cluster-tag': delete_options['--cluster-tag'],\n '--centroid-tag': delete_options['--centroid-tag'],\n '--batch-centroid-tag': delete_options['--batch-centroid-tag'],\n '--prediction-info': main_options['--prediction-info'],\n '--prediction-header': main_options['--prediction-header'],\n '--prediction-fields': main_options['--prediction-fields'],\n '--reports': main_options['--reports'],\n '--remote': main_options['--remote'],\n '--no-batch': main_options['--no-batch']})\n\n defaults = general_defaults[\"BigMLer anomaly\"]\n subcommand_options[\"anomaly\"] = get_anomaly_options(defaults=defaults)\n # general options\n subcommand_options[\"anomaly\"].update(common_options)\n subcommand_options[\"anomaly\"].update(source_options)\n subcommand_options[\"anomaly\"].update(dataset_options)\n subcommand_options[\"anomaly\"].update(test_options)\n subcommand_options[\"anomaly\"].update({\n '--cpp': main_options['--cpp'],\n '--fields-map': main_options['--fields-map'],\n '--source-tag': delete_options['--source-tag'],\n '--dataset-tag': delete_options['--dataset-tag'],\n '--anomaly-tag': delete_options['--anomaly-tag'],\n '--anomaly-score-tag': delete_options['--anomaly-score-tag'],\n '--batch-anomaly-score-tag': delete_options['--batch-anomaly-score-tag'],\n '--prediction-info': main_options['--prediction-info'],\n '--prediction-header': main_options['--prediction-header'],\n '--prediction-fields': main_options['--prediction-fields'],\n '--reports': main_options['--reports'],\n '--remote': main_options['--remote'],\n '--no-batch': main_options['--no-batch']})\n\n\n subcommand_options[\"delete\"] = delete_options\n subcommand_options[\"delete\"].update(common_options)\n\n for subcommand in SUBCOMMANDS:\n subparser = subparsers.add_parser(subcommand)\n parser_add_options(subparser, subcommand_options[subcommand])\n\n # options to be transmitted from analyze to main\n chained_options = [\n \"--debug\", \"--dev\", \"--username\", \"--api-key\", \"--resources-log\",\n \"--store\", \"--clear-logs\", \"--max-parallel-models\",\n \"--max-parallel-evaluations\", \"--objective\", \"--tag\",\n \"--no-tag\", \"--no-debug\", \"--no-dev\", \"--model-fields\", \"--balance\",\n \"--verbosity\", \"--resume\", \"--stack_level\", \"--no-balance\",\n \"--args-separator\", \"--name\"]\n\n return main_parser, chained_options", "def add_rm_parser(subparsers):\n rm_parser = subparsers.add_parser(\"rm\")\n rm_parser.set_defaults(func=rm_cli.main)\n rm_parser.add_argument('--scenario', '-s',\n dest=\"scenario\",\n help='Predefined scenario to use for exection')\n rm_parser.add_argument('--platform', dest=\"platform\",\n help=\"The platform to use \\\n(podman, docker, terraform, shell, python)\")\n rm_parser.add_argument('--vars', dest=\"vars\",\n default=\"\",\n help=\"extra variables\")\n rm_parser.add_argument('--debug', dest=\"debug\",\n action=\"store_true\",\n help=\"Enable debug level logging\")", "def generate_main_parser() -> ArgumentParser:\n # Create parser\n parser = ArgumentParser(\n description=\"Command line interface tool for iic2343.\",\n )\n\n # Add version flag\n parser.add_argument(\n \"-v\",\n \"--version\",\n action=\"version\",\n version=f\"iic2343 version {iic2343.__version__}\",\n )\n\n # Create subparsers\n subparsers = parser.add_subparsers(help=\"Action to be executed.\")\n\n # Serial ports subparser\n generate_serial_ports_subparser(subparsers)\n\n return parser", "def subdivide_parser(subparser):\n parser = subparser.add_parser('subdivide',\\\n help='will subdivide a mesh by one iteration '\\\n '(splitting all triangles in four others)'\\\n ' deprecated use converter.py refine instead')\n parser = add_arg(parser, 'replace')\n parser.add_argument(\"args\", metavar='SELAFIN file', nargs=\"+\")\n\n return subparser", "def build_parser(self, add_help=True):\n self.parser = argparse.ArgumentParser(\n description=self.description, add_help=add_help\n )\n self.parser.prog = f\"python -m {self.package}.{self.module_name}\"\n self.parser.add_argument(\n \"config_file\", help=\"Path/name of YAML configuration file for NEMO nowcast.\"\n )", "def add_command(subparsers):\n\n parser = subparsers.add_parser('create', help=create.__doc__)\n\n parser.add_argument('-r', '--recreate', action='store_true', help='If set, I\\'ll first erase the current database')\n parser.add_argument('-v', '--verbose', action='count', help='Increase verbosity?')\n parser.add_argument('-d', '--image-dir', default='/idiap/project/hface/databases/polimetric_thermal_database/Registered/', help=\"Change the relative path to the directory containing the images of the Polarimetric database.\")\n\n parser.set_defaults(func=create) #action", "def _spawn_cli() -> GooeyParser:\n parser = _add_gui_arg(_cli())\n\n return vars(parser.parse_args())", "def _cli() -> GooeyParser:\n parser = GooeyParser(description='A template CLI app with optional GUI.')\n\n return parser", "def get_parser(subparsers):\n parser = subparsers.add_parser(\"compile\", description=\"Compile model using ML on MCU flow.\")\n parser.set_defaults(flow_func=handle)\n add_compile_options(parser)\n return parser", "def internal_setup_options_parser(self, argparser):\n self.setup_options_parser(argparser)\n subparser = argparser.add_subparsers(dest='verb')\n for (name, verb) in self.verbs.items():\n vparser = subparser.add_parser(name, help=verb.help)\n verb.setup_options_parser(vparser)", "def __init__(self, *args, **kwargs):\n # The subcommand array, with the help command included.\n self.subcommands = list(kwargs.pop('subcommands', []))\n self.subcommands.append(self._HelpSubcommand)\n\n # A more helpful default usage.\n if 'usage' not in kwargs:\n kwargs['usage'] = \"\"\"\n %prog [global options] COMMAND [ARGS...]\n %prog help COMMAND\"\"\"\n\n # Super constructor.\n optparse.OptionParser.__init__(self, *args, **kwargs)\n\n # Adjust the help-visible name of each subcommand.\n for subcommand in self.subcommands:\n subcommand.parser.prog = '%s %s' % \\\n (self.get_prog_name(), subcommand.name)\n\n # Our root parser needs to stop on the first unrecognized argument.\n self.disable_interspersed_args()", "def add_arguments(self, sub_parser):\n sp = sub_parser\n sp.add_argument('--fork', nargs='?')", "def get_parser(subparsers):\n parser = subparsers.add_parser(\"models\", description=\"Manage ML on MCU models.\")\n parser.set_defaults(func=handle)\n add_common_options(parser)\n add_context_options(parser)\n add_models_options(parser)\n return parser", "def add_command_parsers(parser, logparser):\n subparsers = parser.add_subparsers(metavar='Command')\n help_text = 'ONE OF THE FOLLOWING:\\n'\n available_commands = find_commands(\n os.path.join(os.path.abspath(os.path.dirname(__file__)), os.path.pardir, 'functions'))\n max_length = max([len(a) for a in available_commands])\n for command in available_commands:\n child_parser = subparsers.add_parser(command, parents=[logparser])\n call = importlib.import_module('functions.%s'% command)\n if hasattr(call, 'set_argparser'):\n call.set_argparser(child_parser)\n else:\n child_parser.description = 'Description is missing'\n help_text += command + \": \" + \" \"*(max_length-len(command)) + ('\\n'+' '*(max_length+2)\n ).join(textwrap.wrap(child_parser.description,60)) + '\\n'\n child_parser.set_defaults(func=call.main)\n subparsers.help = help_text + '\\nType \"Command --help\" for more information about given command'", "def make_parser():\n\n parser = argparse.ArgumentParser(description='Inference engine.')\n subparsers = parser.add_subparsers(dest=\"subcommand\")\n subparsers.required = True\n solver_subparser = subparsers.add_parser('run')\n solver_subparser.add_argument(\n '-v', '--verbose', help='enable verbose mode.', action='store_true'\n )\n solver_subparser.add_argument(\n '-d', '--debug', help='enable debug mode.', action='store_true'\n )\n solver_subparser.add_argument(\n 'filename', type=str,\n help='filename containing the instructions to process.'\n )\n return parser", "def get_argument_parser(self):\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='command')\n fetch_parser = subparsers.add_parser('fetch', help='fetches and displays a release from discogs')\n fetch_parser.add_argument('discogs_id', help='the ID of the release')\n rip_parser = subparsers.add_parser('rip', help='rips the current CD to WAV')\n rip_parser.add_argument('--destination', help='optional destination for the CD rip')\n search_parser = subparsers.add_parser(\n 'search',\n prog='search',\n help='performs a very simple search on discogs')\n search_parser.add_argument('term', help='the term to search for')\n encode_parser = subparsers.add_parser(\n 'encode', help='Encodes a CD or a set of WAV files to mp3.')\n encode_parser.add_argument(\n 'encoding_from', choices=['cd', 'wav'], help='The source to encode from.')\n encode_parser.add_argument(\n 'encoding_to', choices=['mp3', 'flac'], help='The destination to encode to.')\n encode_parser.add_argument(\n '--source', help='The destination of the source wav file. This can be a file or directory.')\n encode_parser.add_argument(\n '--destination', help='The destination of the resulting mp3 or flac. This can be a file or directory.')\n encode_parser.add_argument(\n '--keep-source', action='store_true', help='If encoding from wav, use this to keep the original wav being removed.')\n encode_parser.add_argument(\n '--collapse-index-tracks', action='store_true', help='If set this will collapse any subtracks to a single track.')\n encode_parser.add_argument(\n '--discogs-id', help='The discogs ID for the release. When this is used metadata from the discogs release will be applied to the encoded files.')\n decode_parser = subparsers.add_parser('decode', help='Decodes a set of FLAC or MP3 files to WAV.')\n decode_parser.add_argument(\n 'decode_from', choices=['flac', 'mp3'], help='The source to decode from.')\n decode_parser.add_argument(\n '--source', help='The destination of the source file. This can be a file or directory.')\n decode_parser.add_argument(\n '--destination', help='The destination of the resulting wav. This can be a file or directory.')\n tag_parser = subparsers.add_parser('tag', help='Tags an audio file')\n tag_parser.add_argument(\n 'action', choices=['add', 'remove'], help='The tagging action to be performed. A tag can be added or removed.')\n tag_parser.add_argument(\n 'format', choices=['mp3', 'flac'], help='The file format of the audio file being tagged.')\n tag_parser.add_argument(\n '--collapse-index-tracks', action='store_true', help='If set this will collapse any subtracks to a single track.')\n tag_parser.add_argument(\n '--source',\n help='The source audio files to tag. This can be a file or a directory. If the source is omitted, the files in the current working directory will be used.')\n tag_parser.add_argument('--discogs-id', help='The discogs ID for the release. When this is used metadata from the discogs release will be applied to the tagged files.')\n tag_parser.add_argument('--artist', help='The artist to use for the tag.')\n tag_parser.add_argument('--album-artist', help='The album artist to use for the tag.')\n tag_parser.add_argument('--album', help='The album to use for the tag.')\n tag_parser.add_argument('--title', help='The title to use for the tag.')\n tag_parser.add_argument('--year', help='The year to use for the tag.')\n tag_parser.add_argument('--genre', help='The year to use for the tag.')\n tag_parser.add_argument('--track-number', help='The track number to use for the tag.')\n tag_parser.add_argument('--track-total', help='The track total to use for the tag.')\n tag_parser.add_argument('--disc-number', help='The disc number to use for the tag.')\n tag_parser.add_argument('--disc-total', help='The disc total to use for the tag.')\n tag_parser.add_argument('--comment', help='The comment for the tag.')\n artwork_parser = subparsers.add_parser('artwork', help='adds or removes artwork from a file')\n artwork_parser.add_argument(\n 'action', choices=['add', 'remove'], help='The artwork action to be performed. The artwork can be added or removed.')\n artwork_parser.add_argument(\n 'type', choices=['mp3', 'flac'], help='The type of file to apply the artwork to.')\n artwork_parser.add_argument(\n '--source', help='The destination file or directory to apply the artwork to. If there is no source then any artwork in the current directory will be used.')\n artwork_parser.add_argument(\n '--destination', help='The destination file or directory to apply the artwork to. If there is no destination then the current directory will be used.')\n mix_parser = subparsers.add_parser('mix', help='adds a mix')\n mix_parser.add_argument('source', help='the source of the mix')\n mix_parser.add_argument('--artist', help='The artist to use for the tag.')\n mix_parser.add_argument('--album', help='The album to use for the mix.')\n mix_parser.add_argument('--title', help='The title to use for the mix.')\n mix_parser.add_argument('--year', help='The year to use for the mix.')\n mix_parser.add_argument('--comment', help='The comment for the mix.')\n return parser", "def add_generate_token_subcommand(\n subparsers: Any,\n) -> None:\n generate_token_sp = subparsers.add_parser(\n \"generate-token\",\n formatter_class=Formatter,\n description=dedent( # noqa: WPS462 -- docs\n \"\"\"\\\n Generate token.\n\n Token is required to consume the protected endpoints.\n\n Example:\n ```shell\n # Generate a rsa key pair\n openssl genpkey -algorithm RSA -out private_key.pem \\\\\n -pkeyopt rsa_keygen_bits:2048\n openssl rsa -pubout -in private_key.pem -out public_key.pem\n # Generate token\n bartender generate-token --format header > token.txt\n # Use token\n curl -X 'GET' \\\\\n 'http://127.0.0.1:8000/api/whoami' \\\\\n -H 'accept: application/json' \\\\\n -H @token.txt | jq .\n ```\n \"\"\",\n ),\n help=\"Generate token.\",\n )\n generate_token_sp.add_argument(\n \"--private-key\",\n default=Path(\"private_key.pem\"),\n type=Path,\n help=\"Path to RSA private key file\",\n )\n generate_token_sp.add_argument(\n \"--username\",\n default=\"someone\",\n help=\"Username to use in token\",\n )\n generate_token_sp.add_argument(\n \"--roles\",\n nargs=\"+\",\n default=[\"expert\", \"guru\"],\n help=\"Roles to use in token\",\n )\n onehour_in_minutes = 60\n generate_token_sp.add_argument(\n \"--lifetime\",\n default=onehour_in_minutes,\n type=int,\n help=\"Lifetime of token in minutes\",\n )\n generate_token_sp.add_argument(\n \"--issuer\",\n default=\"bartendercli\",\n help=\"Issuer of token\",\n )\n generate_token_sp.add_argument(\n \"--oformat\",\n default=\"plain\",\n choices=[\"header\", \"plain\"],\n help=\"Format of output\",\n )\n generate_token_sp.set_defaults(func=generate_token_subcommand)", "def setup_parser():\r\n parser = argparse.ArgumentParser(description='Freeseer Recording Utility',\r\n formatter_class=argparse.RawTextHelpFormatter)\r\n parser.add_argument(\"-v\", \"--version\", action='version',\r\n version=textwrap.dedent('''\\\r\n Freeseer {version} ({platform})\r\n Python {pymajor}.{pyminor}.{pymicro}\r\n PyGst {pygst_version}\r\n PyQt {pyqt_version}\r\n Qt {qt_version}\r\n Yapsy {yapsy_version}\r\n '''.format(version=__version__,\r\n platform=sys.platform,\r\n pymajor=sys.version_info.major,\r\n pyminor=sys.version_info.minor,\r\n pymicro=sys.version_info.micro,\r\n pygst_version=pygst._pygst_version,\r\n pyqt_version=QtCore.PYQT_VERSION_STR,\r\n qt_version=QtCore.QT_VERSION_STR,\r\n yapsy_version=yapsy.__version__)))\r\n\r\n # Configure Subparsers\r\n subparsers = parser.add_subparsers(dest='app', help='Command List')\r\n setup_parser_record(subparsers)\r\n setup_parser_config(subparsers)\r\n setup_parser_talk(subparsers)\r\n setup_parser_report(subparsers)\r\n setup_parser_upload(subparsers)\r\n return parser", "def setup_argparse():\n prsr = argparse.ArgumentParser( description=__description__,\n version=__version__ )\n subprsrs = prsr.add_subparsers(dest='subparser_name')\n \n sub_export = subprsrs.add_parser(prsr_EXPORT,description=prsr_EXPORT_desc,\n help=prsr_EXPORT_help)\n sub_update = subprsrs.add_parser(prsr_UPDATE, description=prsr_UPDATE_desc,\n help=prsr_UPDATE_help)\n sub_script= subprsrs.add_parser(prsr_SCRIPT,description=prsr_SCRIPT_desc,\n help=prsr_SCRIPT_help) \n \n # toplevel options \n # --debug turn on debug logging.\n # --warnings let warnings be logged too, errors are the only thing logged by default.\n # --logfile set the path to the log file.\n prsr.add_argument('--debug', action='store_true',help=\"Turn on debug logging.\",dest='log_debug')\n prsr.add_argument('--warnings',action='store_true',help=\"Let warnings be logged too, errors are the only thing logged by default.\",dest='log_warnings')\n prsr.add_argument('--logfile', default=\"errors.log\",help=\"Set the path to the log file.\",dest='log_path')\n prsr.add_argument('--save', metavar=\"path\",help=\"Set the path to where the current query can be saved.\",dest='script_save_path')\n prsr.add_argument('--time', action='store_true',help=\"LiVSs will report how long it took to run your command.\",dest='run_timer')\n \n #export subcommand\n # input the file/directory that should be the target of an export\n # output the file/directory that should be the location of said export\n #\n # Subcommands of Export\n # util options for dumping utility files from the input target\n # translator options for dumping a translator file from the input target\n #\n esubprsrs = sub_export.add_subparsers(dest='subsubparser_name')\n util_sub_parse = esubprsrs.add_parser(subprsr_UTIL, description=subprsr_UTIL_desc, \n help=subprsr_UTIL_help)\n translator_sub_parse = esubprsrs.add_parser(subprsr_TRANSLATOR,description=subprsr_TRANSLATOR_desc,\n help=subprsr_TRANSLATOR_help)\n sub_export.add_argument('-l','--level', choices=['sys','proj','lang'], help=\"set the level to build the export to, read the HOWTO if you don't know.\",dest='export_level')\n sub_export.add_argument('--mem', action='store_true', help=\"Keep everything in memory and only saves the level specified, generates new files.\",dest='export_mem')\n sub_export.add_argument('--keep', action='store_true', help=\"Use what was currently generated in the input path, don't generate new files.\",dest='export_existing')\n sub_export.add_argument('input', help='the file/directory that should be the target of an export')\n sub_export.add_argument('output', help='the file/directory that should be the location of said export')\n \n # export util subcommand\n # -m, --menus export the menus out of the target\n # -d, --dialogs export the dialogs out of a target\n # -s, --strings export the string tables out of a target\n # -a, --all export all utils from a target (output must be a directory)\n util_sub_parse.add_argument('-m','--menus', action='store_true', help='export the menus out of the target',dest='export_util_menus')\n util_sub_parse.add_argument('-d','--dialogs', action='store_true', help='export the dialogs out of a target',dest='export_util_dialogs')\n util_sub_parse.add_argument('-s','--strings', action='store_true', help='export the string tables out of a target',dest='export_util_strings')\n util_sub_parse.add_argument('-a','--all', action='store_true', help='export all utils from a target (output must be a directory)',dest='export_util_all')\n \n # export translator subcommand\n # -s, --sort Sort the list of strings alphabetically\n # -c, --langcodes Provide a list of language codes you want in the translator file\n # -p, --prune Takes a path to a file that has a list of strings that shouldn't be in the translation file\n translator_sub_parse.add_argument('-s','--sort', action='store_true', help=\"Sort the list of strings alphabetically\", dest='export_translator_order')\n translator_sub_parse.add_argument('-c','--langcodes', metavar='code', nargs='+', help=\"Provide a list of language codes you want in the translator file\",dest='export_translator_langcodes')\n translator_sub_parse.add_argument('-p','--prune', metavar='path', nargs=1, help=\"Takes a path to a file that has a list of strings that shouldn't be in the translation file\",dest='export_translator_prunepath')\n translator_sub_parse.add_argument('-m','--mark', action='store_true', help=\"Highlights possible conflicts in the new translation file\", dest='export_translator_markconflicts')\n\n #update subcommand\n # -t, --translator Location of input translator file\n # -d, --dialogs Location of input dialogs file\n # -m, --menus Location of input menus file\n # -s, --strings Location of input string table file\n # -c, --langcode Limit what lang code in the input file gets updated, this can be a list.\n # -o, --outtype What should we be updating? (sys, proj, rcs)\n # -n, --new Generates new files for whatever outtype is set to (cannot be rcs).\n # output The directory/file to update\n locations = sub_update.add_mutually_exclusive_group(required=True)\n locations.add_argument('-t','--translator',metavar='path',help='Location of input translator file',dest='update_translator')\n locations.add_argument('-d','--dialogs',metavar='path',help='Location of input dialogs file',dest='update_dialogs')\n locations.add_argument('-m','--menus',metavar='path',help='Location of input menus file',dest='update_menus')\n locations.add_argument('-s','--strings',metavar='path',help='Location of input string table file',dest='update_strings')\n sub_update.add_argument('-c','--langcode', metavar='code', nargs='+', help=\"Limit what lang code in the input file gets updated, this can be a list.\", dest='update_langcodes')\n sub_update.add_argument('-o','--outtype', metavar='type', choices=['sys','proj','rcs'], help='What should we be updating?', dest='update_to')\n sub_update.add_argument('-n','--new',action='store_true', help='Generates new files for whatever outtype is set to (cannot be rcs).', dest='update_new')\n sub_update.add_argument('output',help='The directory/file to update')\n \n #script subcommand\n # --dump instead of reading in a file, all default configurations are dumped to the given file\n # path takes a path to a file to run it. For more information on scripting, see the howto file.\n sub_script.add_argument(\"--dump\", action=\"store_true\", help=\"instead of reading in a file, all default configurations are dumped to the given file\")\n sub_script.add_argument(\"input\", help=\"takes a path to a headshot script to run it.\")\n \n return prsr", "def __init__(self, subparsers, region_arg: bool = True, expects_extra_args: bool = False, **argparse_kwargs):\n parser_name = argparse_kwargs.pop(\"name\")\n parser = subparsers.add_parser(parser_name, **argparse_kwargs)\n parser.add_argument(\"--debug\", action=\"store_true\", help=\"Turn on debug logging.\", default=False)\n if region_arg:\n parser.add_argument(\"-r\", \"--region\", help=\"AWS Region this operation corresponds to.\")\n self.register_command_args(parser)\n parser.set_defaults(func=self.execute, expects_extra_args=expects_extra_args)", "def make_cli_parser(self):\n super(BplnArgParser, self).make_cli_parser()\n links_opt = self.cli_parser.add_option('--selected-links',\n help=(\"A CSV-formatted file containing pairs of \"\n \"terms to test. Tests will be done to decide \"\n \"if the annotation term from the first column \"\n \"\\\"is linked to\\\" the annotation term from the \"\n \"second column. [NOTE: Selecting this option \"\n \"restricts the program to only test the matches \"\n \"designated in the file.] [NOTE: This option \"\n \"conflicts with '--selected-terms' and \"\n \"'--selected-terms-with-all'.]\"\n )\n )\n anns_opt = self.cli_parser.add_option('--selected-terms',\n help=(\"A file containing annotation terms to test \"\n \"linkage to each other. The file should contain one \"\n \"term per line. Selecting this option restricts the \"\n \"program to only testing the given terms against \"\n \"each other. [NOTE: This option conflicts with \"\n \"'--selected-links' and \"\n \"'--selected-terms-with-all'.]\"\n )\n )\n anns_all_opt = self.cli_parser.add_option(\n '--selected-terms-with-all',\n help=(\"A file containing annotation terms to test \"\n \"linkage to all other terms (one-against-all and \"\n \"all-against-one). The file should contain one \"\n \"term per line. Selecting this option restricts \"\n \"the program to only testing the given terms \"\n \"against all other terms. [NOTE: \"\n \"This option conflicts with '--selected-links' and \"\n \"'--selected-terms'.]\"\n )\n )\n self.cli_parser.register_conflict(\n (links_opt, anns_opt, anns_all_opt))" ]
[ "0.8356323", "0.7613267", "0.7532559", "0.75132465", "0.74843043", "0.7461825", "0.7458073", "0.7289405", "0.71724516", "0.7165431", "0.7135265", "0.7031047", "0.6956534", "0.6943993", "0.69056773", "0.688262", "0.68790203", "0.6860334", "0.6815078", "0.67114484", "0.67108387", "0.6692973", "0.6692194", "0.66842544", "0.66556716", "0.664244", "0.6632538", "0.65947324", "0.65842444", "0.6562371", "0.6539486", "0.6503882", "0.6503623", "0.64993274", "0.64651495", "0.6452243", "0.64471865", "0.6407968", "0.6396124", "0.6394142", "0.63911366", "0.6344063", "0.63273406", "0.632696", "0.6318256", "0.6306399", "0.6293927", "0.6256494", "0.6256343", "0.62277085", "0.6218185", "0.6210251", "0.618248", "0.61797833", "0.6179422", "0.61605614", "0.6159979", "0.6145137", "0.61332655", "0.6131321", "0.61182", "0.61054975", "0.61049104", "0.6095218", "0.6093803", "0.6089879", "0.60774523", "0.6077219", "0.6074431", "0.60508054", "0.6045958", "0.60451156", "0.6044384", "0.6042533", "0.6039149", "0.60386866", "0.6023419", "0.60221237", "0.6020217", "0.6008185", "0.60007936", "0.59925944", "0.59884405", "0.59877557", "0.5985673", "0.59796655", "0.59777594", "0.5974108", "0.59698445", "0.5966211", "0.59546626", "0.5953078", "0.5947242", "0.5946412", "0.59445643", "0.59414375", "0.59327054", "0.5929507", "0.5917773", "0.5912016" ]
0.5897082
100
Check that all protected branches are a single index unicity of name version must be increased `force` option ignored
def check_pkg_consistency(): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def commit_check(ctx):\n result = ctx.run(f\"{VENV_PREFIX} cz check --rev-range master..\", warn=True)\n if result.exited == 3: # NO_COMMIT_FOUND\n exit(0)\n else:\n exit(result.exited)", "def validate(cfg: defs.Config) -> List[str]: # noqa: C901\n res: List[str] = []\n\n def check_branch(\n comp_name: str, branch_name: str, branch: Dict[str, defs.ComponentVersion]\n ) -> None:\n \"\"\"Validate versions within a single branch.\"\"\"\n uptodate_files: Dict[pathlib.Path, Tuple[pathlib.Path, defs.ComponentFile]] = {}\n\n if not RE_BRANCH_NAME.match(branch_name):\n res.append(f\"{comp_name}: Invalid branch name: {branch_name}\")\n\n for ver, version in sorted(branch.items()):\n if not RE_VERSION_STRING.match(ver):\n res.append(f\"{comp_name}/{branch_name}: Invalid version string: {ver}\")\n\n other_cksums, driver_cksums = _split_by_existence(comp_name, branch_name, version.files)\n if version.outdated:\n update_to = [\n o_version\n for o_version in branch.values()\n if not o_version.outdated\n and _split_by_existence(comp_name, branch_name, o_version.files)[0]\n == other_cksums\n ]\n if len(update_to) != 1:\n res.append(\n f\"{comp_name}/{branch_name}/{ver}: Got {len(update_to)} possible \"\n f\"versions to update to instead of exactly one\"\n )\n else:\n bad_files = sorted(\n relpath\n for relpath, (path, fdata) in driver_cksums.items()\n if util.file_sha256sum(path) != fdata.sha256\n )\n if bad_files:\n res.append(f\"{comp_name}/{branch_name}/{ver}: Bad checksum for {bad_files}\")\n\n if not uptodate_files:\n uptodate_files = driver_cksums\n elif uptodate_files != driver_cksums:\n res.append(\n f\"{comp_name}/{branch_name}: All the up-to-date versions should \"\n f\"define the same set of files with the same checksums\"\n )\n\n if not any(not version.outdated for version in branch.values()):\n res.append(f\"{comp_name}/{branch_name}: No non-outdated versions\")\n\n def check_component(comp_name: str, comp: defs.Component) -> None:\n \"\"\"Validate the definition of a single component.\"\"\"\n if not RE_COMP_NAME.match(comp_name):\n res.append(f\"Invalid component name: {comp_name}\")\n\n for branch_name, branch in sorted(comp.branches.items()):\n check_branch(comp_name, branch_name, branch)\n\n for comp_name, comp in sorted(cfg.all_components.components.items()):\n check_component(comp_name, comp)\n\n return res", "def needs_patch():\n return (IndexName is not None and\n hasattr(BaseDatabaseSchemaEditor, '_create_unique_sql'))", "def check_fast_forward():\n\n if oldrev != zero: # ignore new branches\n cmd = ['git', 'rev-list', newrev + '..' + oldrev]\n missed_refs = call(cmd)\n missed_ref_count = len(missed_refs)\n if missed_ref_count > 0:\n add_error(\n \"[POLICY] Non fast-forward updates are not allowed for this branch\")", "def version_update_has_wrong_order(versions, new_versions):\n branches = find_branches(versions)\n if not branches:\n return False\n\n tip = branches[-1]\n\n def find_branch_for_version(v):\n for branch in branches:\n if on_same_branch(branch, v):\n return branch\n return tip\n\n chunks = groupby(new_versions, find_branch_for_version)\n chunks = [[key] + list(values)\n for key, values\n in chunks]\n\n some_chunk_has_hole = any(map(has_hole, chunks))\n return some_chunk_has_hole", "def checkConflicts(self):\n\t\treturn", "def ensure_tracking_branches(args):\n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n branch_missing = repo.command(\n [\"rev-parse\", \"--verify\", \"-q\", project.refspec],\n capture_stdout=True)\n \n if branch_missing:\n logging.warn(\"Branch %s does not exist in project %s. checking out.\" %\n (project.refspec, name))\n repo.command([\"branch\", \"--track\",\n project.tracking_branch, project.remote_refspec])", "def _get_index_if_valid(self, course_key, force=False):\n if course_key.org is None or course_key.course is None or course_key.run is None or course_key.branch is None:\n return None\n else:\n index_entry = self.get_course_index(course_key)\n is_head = (\n course_key.version_guid is None or\n index_entry['versions'][course_key.branch] == course_key.version_guid\n )\n if is_head or force:\n return index_entry\n else:\n raise VersionConflictError(\n course_key,\n index_entry['versions'][course_key.branch]\n )", "def ensure_index(cls, *a, **ka):\n cls._dbag.ensure_index(*a, **ka)", "def resolve_conflicts(self, commit=True):\n pass # pragma: no cover", "def enforce_clean_option(args, run):\n repos = run.experiment_info[\"repositories\"]\n if not repos:\n raise RuntimeError(\n \"No version control detected. \"\n \"Cannot enforce clean repository.\\n\"\n \"Make sure that your sources under VCS and the \"\n \"corresponding python package is installed.\"\n )\n else:\n for repo in repos:\n if repo[\"dirty\"]:\n raise RuntimeError(\n \"EnforceClean: Uncommited changes in \"\n 'the \"{}\" repository.'.format(repo)\n )", "def __gitCheckPatches(self):\n self.vcs.gitApplyCheckPatches(self.project.getProjectPath(),\n check=True)", "def check_branch(\n comp_name: str, branch_name: str, branch: Dict[str, defs.ComponentVersion]\n ) -> None:\n uptodate_files: Dict[pathlib.Path, Tuple[pathlib.Path, defs.ComponentFile]] = {}\n\n if not RE_BRANCH_NAME.match(branch_name):\n res.append(f\"{comp_name}: Invalid branch name: {branch_name}\")\n\n for ver, version in sorted(branch.items()):\n if not RE_VERSION_STRING.match(ver):\n res.append(f\"{comp_name}/{branch_name}: Invalid version string: {ver}\")\n\n other_cksums, driver_cksums = _split_by_existence(comp_name, branch_name, version.files)\n if version.outdated:\n update_to = [\n o_version\n for o_version in branch.values()\n if not o_version.outdated\n and _split_by_existence(comp_name, branch_name, o_version.files)[0]\n == other_cksums\n ]\n if len(update_to) != 1:\n res.append(\n f\"{comp_name}/{branch_name}/{ver}: Got {len(update_to)} possible \"\n f\"versions to update to instead of exactly one\"\n )\n else:\n bad_files = sorted(\n relpath\n for relpath, (path, fdata) in driver_cksums.items()\n if util.file_sha256sum(path) != fdata.sha256\n )\n if bad_files:\n res.append(f\"{comp_name}/{branch_name}/{ver}: Bad checksum for {bad_files}\")\n\n if not uptodate_files:\n uptodate_files = driver_cksums\n elif uptodate_files != driver_cksums:\n res.append(\n f\"{comp_name}/{branch_name}: All the up-to-date versions should \"\n f\"define the same set of files with the same checksums\"\n )\n\n if not any(not version.outdated for version in branch.values()):\n res.append(f\"{comp_name}/{branch_name}: No non-outdated versions\")", "def check_heads(repo, their_heads, context):\n heads = repo.heads()\n heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()\n if not (\n their_heads == [b'force']\n or their_heads == heads\n or their_heads == [b'hashed', heads_hash]\n ):\n # someone else committed/pushed/unbundled while we\n # were transferring data\n raise error.PushRaced(\n b'repository changed while %s - please try again' % context\n )", "def _does_require_force_update(self):\n\n if self.current_version[0][0] > self.version_yaml[0]:\n # The current version first index is greater than the one we have in the\n # current version.yaml.\n\n # We return True.\n return True\n\n # We return False, we do not need to force the update for\n # the current version number.\n return False", "def __gitVerify(self):\n self.vcs.gitVerify(self.project.getProjectPath())", "def test_resetcherrypick_resets_index(repository: Repository, path: Path) -> None:\n createconflict(repository, path, ours=\"a\", theirs=\"b\")\n\n repository.resetcherrypick()\n\n index = repository._repository.index\n assert index.write_tree() == repository.head.commit.tree.id", "def check_fast_forward(self, branch):\n proc = run_cmd(self.git + ['rev-list', '%s-tmp..%s' %\n (branch, branch), '--'])\n if proc.stdout.strip():\n # Commits have been made on the main branch since the last update\n # command.\n raise EmtError('cannot fast-forward the %s branch, please '\n 'run again the update command' % branch)", "def index_is_dirty():\n result, output = popen('git diff --cached', False, False)\n return len(output) > 0", "def index_ddl_if_exists(self):\n return exclusions.closed()", "def _prepare_manual_resolve(self):\n # Files that have been deleted between branch and cherry-pick will not have\n # their skip-worktree bit set so set it manually for those files to avoid\n # git status incorrectly listing them as unstaged deletes.\n repo_status = self._run_git_command(\n ['-c', 'core.quotePath=false', 'status', '--porcelain']).splitlines()\n extra_files = [f[3:] for f in repo_status if f[:2] == ' D']\n if extra_files:\n self._run_git_command_with_stdin(\n ['update-index', '--skip-worktree', '--stdin'],\n stdin='\\n'.join(extra_files) + '\\n')", "def validate_shared_index_options(options):\n \n if options.vcf_phasing:\n require(all([vcf.endswith('.vcf.gz') for vcf in options.vcf_phasing]),\n 'input phasing files must end with .vcf.gz')\n if 'gbwt' in options.indexes:\n require(len(options.vcf_phasing) > 0, 'generating a GBWT requires a VCF with phasing information')\n if options.gbwt_prune:\n require(('gbwt' in options.indexes) or options.gbwt_input, '--gbwt_index or --gbwt_input required for --gbwt_prune')\n if options.vcf_phasing_regions:\n require('gbwt' in options.indexes, \"cannot hint regions to GBWT indexer without building a GBWT index\")", "def replace_old(self):\n # Tell the client the index is ready to be used and to no longer show it as building.\n self.db.indexes.find_one_and_update({\"_id\": self.params[\"index_id\"]}, {\n \"$set\": {\n \"ready\": True\n }\n })\n\n self.dispatch(\"indexes\", \"update\", [self.params[\"index_id\"]])\n\n active_indexes = virtool.db.sync.get_active_index_ids(self.db, self.params[\"ref_id\"])\n\n remove_unused_index_files(self.params[\"reference_path\"], active_indexes)\n\n query = {\n \"_id\": {\n \"$not\": {\n \"$in\": active_indexes\n }\n }\n }\n\n self.db.indexes.update_many(query, {\n \"$set\": {\n \"has_files\": False\n }\n })\n\n id_list = self.db.indexes.distinct(\"_id\", query)\n\n self.dispatch(\"indexes\", \"update\", id_list)\n\n # Find OTUs with changes.\n pipeline = [\n {\"$project\": {\n \"reference\": True,\n \"version\": True,\n \"last_indexed_version\": True,\n \"comp\": {\"$cmp\": [\"$version\", \"$last_indexed_version\"]}\n }},\n {\"$match\": {\n \"reference.id\": self.params[\"ref_id\"],\n \"comp\": {\"$ne\": 0}\n }},\n {\"$group\": {\n \"_id\": \"$version\",\n \"id_list\": {\n \"$addToSet\": \"$_id\"\n }\n }}\n ]\n\n id_version_key = {agg[\"_id\"]: agg[\"id_list\"] for agg in self.db.otus.aggregate(pipeline)}\n\n # For each version number\n for version, id_list in id_version_key.items():\n self.db.otus.update_many({\"_id\": {\"$in\": id_list}}, {\n \"$set\": {\n \"last_indexed_version\": version\n }\n })", "def _rebuild_index(self):\n from django.core.management import call_command\n call_command('rebuild_index', interactive=False, verbosity=0)", "def test_multiple_packages_same_version(self):\n with patch.object(self.request.access, \"allow_overwrite\", []):\n name, version = \"a\", \"1\"\n path1 = \"old_package_path-1.tar.gz\"\n self.db.upload(path1, BytesIO(b\"test1234\"), name, version)\n path2 = \"new_path-1.whl\"\n self.db.upload(path2, BytesIO(b\"test1234\"), name, version)\n\n all_versions = self.db.all(name)\n self.assertEqual(len(all_versions), 2)", "def test_multiple_packages_same_version(self):\n with patch.object(self.request.access, \"allow_overwrite\", []):\n name, version = \"a\", \"1\"\n path1 = \"old_package_path-1.tar.gz\"\n self.db.upload(path1, BytesIO(b\"test1234\"), name, version)\n path2 = \"new_path-1.whl\"\n self.db.upload(path2, BytesIO(b\"test1234\"), name, version)\n\n all_versions = self.db.all(name)\n self.assertEqual(len(all_versions), 2)", "def test_multiple_packages_same_version(self):\n with patch.object(self.request.access, \"allow_overwrite\", []):\n name, version = \"a\", \"1\"\n path1 = \"old_package_path-1.tar.gz\"\n self.db.upload(path1, BytesIO(b\"test1234\"), name, version)\n path2 = \"new_path-1.whl\"\n self.db.upload(path2, BytesIO(b\"test1234\"), name, version)\n\n all_versions = self.db.all(name)\n self.assertEqual(len(all_versions), 2)", "def get_all_changes_for_package(\n provider_package_id: str,\n verbose: bool,\n base_branch: str,\n force: bool,\n) -> tuple[bool, list[list[Change]] | Change | None, str]:\n provider_details = get_provider_details(provider_package_id)\n current_version = provider_details.versions[0]\n current_tag_no_suffix = get_version_tag(current_version, provider_package_id)\n if verbose:\n console.print(f\"Checking if tag '{current_tag_no_suffix}' exist.\")\n if not force and not subprocess.call(\n get_git_tag_check_command(current_tag_no_suffix),\n cwd=provider_details.source_provider_package_path,\n stderr=subprocess.DEVNULL,\n ):\n if verbose:\n console.print(f\"The tag {current_tag_no_suffix} exists.\")\n # The tag already exists\n changes = subprocess.check_output(\n get_git_log_command(verbose, f\"{HEAD_OF_HTTPS_REMOTE}/{base_branch}\", current_tag_no_suffix),\n cwd=provider_details.source_provider_package_path,\n text=True,\n )\n if changes:\n provider_details = get_provider_details(provider_package_id)\n doc_only_change_file = os.path.join(\n provider_details.source_provider_package_path, \".latest-doc-only-change.txt\"\n )\n if os.path.exists(doc_only_change_file):\n with open(doc_only_change_file) as f:\n last_doc_only_hash = f.read().strip()\n try:\n changes_since_last_doc_only_check = subprocess.check_output(\n get_git_log_command(\n verbose, f\"{HEAD_OF_HTTPS_REMOTE}/{base_branch}\", last_doc_only_hash\n ),\n cwd=provider_details.source_provider_package_path,\n text=True,\n )\n if not changes_since_last_doc_only_check:\n console.print()\n console.print(\n \"[yellow]The provider has doc-only changes since the last release. Skipping[/]\"\n )\n # Returns 66 in case of doc-only changes\n sys.exit(66)\n if len(changes) > len(changes_since_last_doc_only_check):\n # if doc-only was released after previous release - use it as starting point\n # but if before - stay with the releases from last tag.\n changes = changes_since_last_doc_only_check\n except subprocess.CalledProcessError:\n # ignore when the commit mentioned as last doc-only change is obsolete\n pass\n\n console.print(f\"[yellow]The provider {provider_package_id} has changes since last release[/]\")\n console.print()\n console.print(f\"[bright_blue]Provider: {provider_package_id}[/]\\n\")\n changes_table, array_of_changes = convert_git_changes_to_table(\n f\"NEXT VERSION AFTER + {provider_details.versions[0]}\",\n changes,\n base_url=\"https://github.com/apache/airflow/commit/\",\n markdown=False,\n )\n print_changes_table(changes_table)\n return False, array_of_changes[0], changes_table\n else:\n console.print(f\"No changes for {provider_package_id}\")\n return False, None, \"\"\n if verbose:\n console.print(\"The tag does not exist. \")\n if len(provider_details.versions) == 1:\n console.print(\n f\"The provider '{provider_package_id}' has never been released but it is ready to release!\\n\"\n )\n else:\n console.print(f\"New version of the '{provider_package_id}' package is ready to be released!\\n\")\n next_version_tag = f\"{HEAD_OF_HTTPS_REMOTE}/{base_branch}\"\n changes_table = \"\"\n current_version = provider_details.versions[0]\n list_of_list_of_changes: list[list[Change]] = []\n for version in provider_details.versions[1:]:\n version_tag = get_version_tag(version, provider_package_id)\n changes = subprocess.check_output(\n get_git_log_command(verbose, next_version_tag, version_tag),\n cwd=provider_details.source_provider_package_path,\n text=True,\n )\n changes_table_for_version, array_of_changes_for_version = convert_git_changes_to_table(\n current_version, changes, base_url=\"https://github.com/apache/airflow/commit/\", markdown=False\n )\n changes_table += changes_table_for_version\n list_of_list_of_changes.append(array_of_changes_for_version)\n next_version_tag = version_tag\n current_version = version\n changes = subprocess.check_output(\n get_git_log_command(verbose, next_version_tag),\n cwd=provider_details.source_provider_package_path,\n text=True,\n )\n changes_table_for_version, array_of_changes_for_version = convert_git_changes_to_table(\n current_version, changes, base_url=\"https://github.com/apache/airflow/commit/\", markdown=False\n )\n changes_table += changes_table_for_version\n if verbose:\n print_changes_table(changes_table)\n return True, list_of_list_of_changes or None, changes_table", "def _check_integrity(self):\n try:\n for q in self:\n pass\n except TreeIntegrityError:\n raise ValueError('Invalid \"tree\" parameter.')", "def check_consistency(self, es):", "def verify_branch(root, index, proof, output_as_int=False):\n index = get_index_in_permuted(index, 2**len(proof) // 2)\n # I think this is a bug. Should be 2**len(proof) // 2\n # But I think it's OK since we're only doing parity checks\n #index += 2**len(proof)\n index += 2**len(proof) // 2\n v = proof[0]\n for p in proof[1:]:\n if index % 2:\n v = blake(p + v)\n else:\n v = blake(v + p)\n index //= 2\n assert v == root\n return int.from_bytes(proof[0], 'big') if output_as_int else proof[0]", "def flag_all_commit(self):\n\t\tfor k in self.data.keys():\n\t\t\tindex = 0\n\t\t\tfor item in self[k]:\n\t\t\t\tself.data[k][index]['meta']['needs_commit'] = True\n\t\t\t\tindex += 1", "def checkConflicts(self):\n\t\tapDisplay.printError(\"you did not create a 'checkConflicts' function in your script\")\n\t\traise NotImplementedError()", "def svn_fs_check_related(*args):\r\n return _fs.svn_fs_check_related(*args)", "def test_heads_create_existing_branch_force(repository: Repository) -> None:\n head, heads = repository.head, repository.heads\n branch = heads.create(\"branch\", head.commit)\n updatefile(repository.path / \"a\")\n heads.create(branch.name, head.commit, force=True)\n assert head.commit == branch.commit", "def _get_index_if_valid(self, locator, force=False, continue_version=False):\r\n if locator.org is None or locator.offering is None or locator.branch is None:\r\n if continue_version:\r\n raise InsufficientSpecificationError(\r\n \"To continue a version, the locator must point to one ({}).\".format(locator)\r\n )\r\n else:\r\n return None\r\n else:\r\n index_entry = self.db_connection.get_course_index(locator)\r\n is_head = (\r\n locator.version_guid is None or\r\n index_entry['versions'][locator.branch] == locator.version_guid\r\n )\r\n if (is_head or (force and not continue_version)):\r\n return index_entry\r\n else:\r\n raise VersionConflictError(\r\n locator,\r\n index_entry['versions'][locator.branch]\r\n )", "def commit(self, force=False):\n if len(self.changed) > 0 or force: \n self.things_obj.set_all(self.things_dict, self.parent)\n logging.error('commited!!!!!!!!')", "def __checkFeatureIndex__(self, index, indexes):\n if index is not False:\n indexes.append(index)", "def unprotect(args):\n\n for branch in args.branch:\n logging.info(\"Unprotecting %s...\", branch)\n #pylint: disable=no-value-for-parameter, cell-var-from-loop\n manage_repos(args, lambda repo, _: repo.unprotect(branch))", "def test_heads_contains_true(repository: Repository) -> None:\n assert repository.head.name in repository.heads", "def rebuild_index(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n with get_db_connection() as db:\n c = db.cursor()\n execute_with_retry(db, c, self._rebuild_index)", "def verify_index(yaml_data, build_index=True):\n print('='*10, 'Verifying Index', '='*10)\n index_utils_command = [\n os.path.join(yaml_data['root'], yaml_data['index_utils_command']),\n '-index', os.path.join(yaml_data['index_root'] if yaml_data['index_root'] else '',\n 'lucene-index.{0}.pos+docvectors{1}'.format(yaml_data['name'], '+rawdocs' if 'storeRawdocs' in yaml_data['index_options'] else ''))\n if build_index else yaml_data['index_path'],\n '-stats'\n ]\n out = check_output(' '.join(index_utils_command)).decode('utf-8').split('\\n')\n for line in out:\n stat = line.split(':')[0]\n if stat in yaml_data['index_stats']:\n value = int(line.split(':')[1])\n assert value == yaml_data['index_stats'][stat]\n print(line)\n print(OKBLUE, '='*10, 'Verifying Index Succeed', '='*10, ENDC)", "def test_major(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[0] = int(new_version_parts[0]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is False", "def test_verify_recursive_and_transverse_acl_options():", "def test_force_and_update(self, _get_repo_name, # pylint: disable=unused-argument\n set_version_from_git_tag_mock):\n force_semver_git_tag_plugin(self.project, self.logger)\n self.project.set_property('semver_git_tag_increment_part', 'minor')\n update_version_from_git_tag(self.project, self.logger)\n self.assertEqual(set_version_from_git_tag_mock.call_count, 2)\n self.assertEqual(self.logger.info.call_count, 2)\n self.logger.warn.assert_called_once_with(\n \"Property `semver_git_tag_increment_part` was changed. \"\n \"For better compatibility recommended to use \"\n \"command line `pyb ... -P semver_git_tag_increment_part=...`, \"\n \"otherwise some version-related properties could \"\n \"be spoiled.\"\n )", "def commit():\n return _run_indexer_func(\"commit\")", "def createVersions(self, force = False):\n\t\tlow = self.makeLow(force)\n\t\tlow.makeTx(force)\n\t\tmid = self.makeMid(force)\n\t\tmid.makeTx(force)\n\t\tself.makeTx(force)", "def test_force(self, db, course_dir):\n run_nbgrader([\"db\", \"assignment\", \"add\", \"ps1\", \"--db\", db])\n run_nbgrader([\"db\", \"student\", \"add\", \"foo\", \"--db\", db])\n\n self._copy_file(join(\"files\", \"submitted-unchanged.ipynb\"), join(course_dir, \"source\", \"ps1\", \"p1.ipynb\"))\n self._make_file(join(course_dir, \"source\", \"ps1\", \"foo.txt\"), \"foo\")\n self._make_file(join(course_dir, \"source\", \"ps1\", \"data\", \"bar.txt\"), \"bar\")\n run_nbgrader([\"generate_assignment\", \"ps1\", \"--db\", db])\n\n self._copy_file(join(\"files\", \"submitted-unchanged.ipynb\"), join(course_dir, \"submitted\", \"foo\", \"ps1\", \"p1.ipynb\"))\n self._make_file(join(course_dir, \"submitted\", \"foo\", \"ps1\", \"foo.txt\"), \"foo\")\n self._make_file(join(course_dir, \"submitted\", \"foo\", \"ps1\", \"data\", \"bar.txt\"), \"bar\")\n run_nbgrader([\"autograde\", \"ps1\", \"--db\", db])\n\n self._make_file(join(course_dir, \"autograded\", \"foo\", \"ps1\", \"blah.pyc\"), \"asdf\")\n run_nbgrader([\"generate_feedback\", \"ps1\", \"--db\", db])\n\n assert isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"p1.html\"))\n assert isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.txt\"))\n assert isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"data\", \"bar.txt\"))\n assert not isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"blah.pyc\"))\n\n # check that it skips the existing directory\n remove(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.txt\"))\n run_nbgrader([\"generate_feedback\", \"ps1\", \"--db\", db])\n assert not isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.txt\"))\n\n # force overwrite the supplemental files\n run_nbgrader([\"generate_feedback\", \"ps1\", \"--db\", db, \"--force\"])\n assert isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.txt\"))\n\n # force overwrite\n remove(join(course_dir, \"autograded\", \"foo\", \"ps1\", \"foo.txt\"))\n run_nbgrader([\"generate_feedback\", \"ps1\", \"--db\", db, \"--force\"])\n assert isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"p1.html\"))\n assert not isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.txt\"))\n assert isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"data\", \"bar.txt\"))\n assert not isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"blah.pyc\"))", "def test_version_control_invalid(self):\n versions = ('Thirteen', '-1', -1, '' , 13)\n # A fresh repository doesn't go up to version 13 yet\n for version in versions:\n #self.assertRaises(ControlledSchema.InvalidVersionError,\n # Can't have custom errors with assertRaises...\n try:\n ControlledSchema.create(self.engine, self.repos, version)\n self.assertTrue(False, repr(version))\n except exceptions.InvalidVersionError:\n pass", "def test_incorrect_case_file_index(data: TestData) -> None:\n req = install_req_from_line(\"dinner\")\n finder = make_test_finder(index_urls=[data.find_links3])\n found = finder.find_requirement(req, False)\n assert found is not None\n assert found.link.url.endswith(\"Dinner-2.0.tar.gz\")", "def test_sort_git_master_and_latest(self):\n identifiers = [\"latest\", \"master\", \"1.0\", \"2.0\", \"1.1\", \"1.9\", \"1.10\"]\n self.project.repo_type = REPO_TYPE_GIT\n self.project.save()\n self.project.versions.get(slug=LATEST).delete()\n\n for identifier in identifiers:\n get(\n Version,\n project=self.project,\n type=BRANCH,\n identifier=identifier,\n verbose_name=identifier,\n slug=identifier,\n )\n\n versions = list(Version.objects.filter(project=self.project))\n self.assertEqual(\n [\"master\", \"latest\", \"2.0\", \"1.10\", \"1.9\", \"1.1\", \"1.0\"],\n [v.slug for v in sort_version_aware(versions)],\n )", "def try_cherry_pick_all(rev):\n num_par = len(parents(rev))\n any_success = False\n for i in range(1, num_par + 1):\n any_success = any_success or try_cherry_pick(rev, mainline=i)\n return any_success", "def ensure_indexes(self):\n self.db_connection.ensure_indexes()", "def __gitVerifyBundle(self):\n self.vcs.gitVerifyBundle(self.project.getProjectPath())", "def test_unique_together(self):\n\n for mb_model in self.mb_model_list:\n indexes = connection.introspection.get_indexes(\n self.cursor, mb_model._meta.db_table)\n if not indexes and not is_db_view(mb_model._meta.db_table):\n self.assertTrue(mb_model._meta.unique_together)", "def isort_check(ctx):\n ctx.run(f\"{VENV_PREFIX} isort --atomic --check-only .\")", "def ensure_sync_master_branch(self):\n # TODO(robertocn): Investigate what causes the states mentioned in the\n # docstring in the first place.\n self.api.m.git('update-ref', 'refs/heads/master',\n 'refs/remotes/origin/master')\n self.api.m.git('checkout', 'master', cwd=self.api.m.path['checkout'])", "def dirty_branches(self):\n # If no course index has been set, then no branches have changed\n if self.index is None:\n return []\n\n # If there was no index in the database to start with, then all branches\n # are dirty by definition\n if self.initial_index is None:\n return list(self.index.get('versions', {}).keys())\n\n # Return branches whose ids differ between self.index and self.initial_index\n return [\n branch\n for branch, _id\n in self.index.get('versions', {}).items()\n if self.initial_index.get('versions', {}).get(branch) != _id\n ]", "def protect_branch_with_approvals(branch_name: str, count: int = 1,\n require_code_owner_review: bool = False) -> repoproc_t:\n def _protect_branch_with_approvals(org: Organization, repo: Repository,\n branches: Dict[str, Branch]) -> List[Change[str]]:\n if branch_name in branches:\n return _protect_branch(branches[branch_name], count, require_code_owner_review)\n else:\n print_warning(\"Requested to protect branch %s on repo %s, but the branch does not exist.\" %\n (highlight(branch_name), highlight(repo.name)))\n return []\n return _protect_branch_with_approvals", "def precommit(exit=True):\n tmpdir = tempfile.mkdtemp()\n\n try:\n copy_index(tmpdir)\n\n modified = check_output(['git', 'diff', '--cached', '--name-only',\n '--diff-filter=ACMRT'])\n modified = [name.strip() for name in modified.splitlines()]\n path = os.environ['PATH']\n with pushd(tmpdir) as prevdir:\n conf = load_conf()\n # Activate the virtualenv before running checks\n if 'env' in conf:\n binpath = os.path.abspath(os.path.join(prevdir,\n conf['env']['path'],\n 'bin'))\n if binpath not in path.split(os.pathsep):\n path = binpath + os.pathsep + path\n retcode = run_checks(conf.get('hooks_all', []),\n conf.get('hooks_modified', []), modified,\n path)\n\n if exit:\n sys.exit(retcode)\n else:\n return retcode\n finally:\n shutil.rmtree(tmpdir)", "def validate_index_options(options):\n if len(options.indexes) > 0:\n require(len(options.graphs) == 0 or options.chroms, '--chroms must be specified for --graphs')\n require(len(options.graphs) == 1 or len(options.chroms) == len(options.graphs),\n '--chroms and --graphs must have'\n ' same number of arguments if more than one graph specified if doing anything but xg indexing')\n require(any([len(options.indexes) > 0, \n options.bwa_index_fasta]),\n 'one of --xg_index, --gcsa_index, --snarls_index, --trivial_snarls_index, --id_ranges_index, '\n '--gbwt_index, --minimizer_index, --distance_index, --all_index, --alt_path_gam_index or '\n '--bwa_index_fasta is required')\n require(not options.gbwt_prune or options.node_mapping,\n '--node_mapping required with --gbwt_prune')\n require('gbwt' not in options.indexes or not options.gbwt_input,\n 'only one of --gbwt_index and --gbwt_input can be used at a time')\n if options.gbwt_input:\n require(options.gbwt_prune == 'gbwt', '--gbwt_prune required with --gbwt_input')\n validate_shared_index_options(options)", "def test_hash(self):\n ffs = get_available_force_fields()\n\n for ff1, ff2 in itertools.combinations(ffs, 2):\n assert hash(ff1) != hash(ff2)", "def forced_checkout_with_faux_obstructions(sbox):\n\n # Make a local tree that partially obstructs the paths coming from the\n # repos but has no true differences.\n expected_output = make_local_tree(sbox, False, False)\n\n expected_wc = svntest.main.greek_state.copy()\n\n svntest.actions.run_and_verify_checkout(sbox.repo_url,\n sbox.wc_dir, expected_output,\n expected_wc, [], '--force')", "def branch(self, name=None, clean=None, force=None):\n\n if name and clean:\n raise ValueError('Cannot use both name and clean')\n\n self._client.execute('branch', name, f=force, C=clean)\n\n if name:\n return name\n elif not clean:\n return out.strip()\n else:\n return out[len('reset working directory to branch '):]", "def check_versions(ctx, show=False):\n sys.path.insert(0, os.path.join(ROOT_DIR, '_tools'))\n import versions\n versions.main()", "def update_repo_all(force=False):\n logger.info(\"attempting to update repo...\")\n for model in kimobjects.Model.all():\n for test in kimobjects.Test.all():\n #logger.info(\"attempting to update test %r\",test)\n if force or not kimobjects.TestResult.test_result_exists(test,model):\n logger.info(\"Running %r vs %r\",test,model)\n try:\n results = run_test_on_model(test,model)\n tr = kimobjects.TestResult(results=results)\n except:\n logger.error(\"WE HAD an error on (%r,%r) with:\\n%r\",test,model,sys.exc_info()[0])\n else:\n logger.info(\"%r vs %r seems current\",test,model)", "def test_dependency_order(self):\n sections = self.old_manifest.formula_sections()\n assert sections.index(\"git\") < sections.index(\n \"sub\"\n ), \"Dependency is out of order! git comes after sub\"", "def check(force, verbose, packages):\n code = 0\n packages = CFG.package_specs(packages)\n if not packages:\n print(\"No packages installed\")\n sys.exit(0)\n\n for pspec in packages:\n desired = pspec.get_desired_version_info(force=force)\n dv = runez.bold(desired.version)\n manifest = pspec.get_manifest()\n if desired.problem:\n msg = desired.problem\n code = 1\n\n elif not manifest or not manifest.version:\n msg = \"v%s is not installed\" % dv\n code = 1\n\n elif manifest.version == desired.version:\n msg = \"v%s is installed\" % dv\n\n else:\n action = \"upgraded to\" if desired.source == \"latest\" else \"caught up to %s\" % desired.source\n msg = \"v%s installed, can be %s v%s\" % (runez.dim(manifest.version), action, dv)\n\n print(\"%s: %s\" % (pspec.dashed, msg))\n\n sys.exit(code)", "def reindex(self):", "def reindex(self):", "def check_unstaged_changes(self):\n pass", "def check_consistency(self):\n raise NotImplementedError()", "def vulnerable_versions(self):\n raise NotImplementedError()", "def apply_patch():\n assert BaseDatabaseSchemaEditor is not None\n\n def _create_unique_sql(self, *args, **kwargs):\n from django.db.backends.ddl_references import IndexName\n\n statement = orig_create_unique_sql(self, *args, **kwargs)\n\n if statement is not None:\n index_name = statement.parts['name']\n\n if (isinstance(index_name, IndexName) and\n index_name.create_index_name == self._create_index_name):\n # The result will be unquoted. Let's quote it.\n index_name.create_index_name = lambda *args, **kwargs: \\\n self.quote_name(self._create_index_name(*args, **kwargs))\n\n return statement\n\n orig_create_unique_sql = BaseDatabaseSchemaEditor._create_unique_sql\n BaseDatabaseSchemaEditor._create_unique_sql = _create_unique_sql", "def checkGit(directory):", "async def build_secret_index(self):\n pass", "def check_integrity(self) -> None:\n for subset in self.subsets:\n if not self._check_subset_integrity(subset):\n raise ValueError(f\"subset {subset} not found or corrupt\")", "def verify_proof(ap, key_index, key, leaf_hash):\n if key_index == len(ap):\n return leaf_hash\n\n if bit_is_set(key, key_index):\n right = verify_proof(ap, key_index+1, key, leaf_hash)\n return hashlib.sha256(ap[len(ap)-key_index-1] + right).digest()\n left = verify_proof(ap, key_index+1, key, leaf_hash)\n return hashlib.sha256(left + ap[len(ap)-key_index-1]).digest()", "def test_finder_only_installs_data_require(data: TestData) -> None:\n\n # using a local index (that has pre & dev releases)\n finder = make_test_finder(index_urls=[data.index_url(\"datarequire\")])\n links = finder.find_all_candidates(\"fakepackage\")\n assert {str(v.version) for v in links} == {\"1.0.0\", \"3.3.0\", \"9.9.9\"}", "def test_version_control_specified(self):\n # Establish version control on this database\n version = 0\n dbcontrol = ControlledSchema.create(self.engine, self.repos, version)\n self.assertEqual(dbcontrol.version, version)\n\n # Correct when we load it, too\n dbcontrol = ControlledSchema(self.engine, self.repos)\n self.assertEqual(dbcontrol.version, version)\n\n dbcontrol.drop()\n\n # Now try it with a nonzero value\n version = 10\n for i in range(version):\n self.repos.create_script('')\n self.assertEqual(self.repos.latest, version)\n\n # Test with some mid-range value\n dbcontrol = ControlledSchema.create(self.engine,self.repos, 5)\n self.assertEqual(dbcontrol.version, 5)\n dbcontrol.drop()\n\n # Test with max value\n dbcontrol = ControlledSchema.create(self.engine, self.repos, version)\n self.assertEqual(dbcontrol.version, version)\n dbcontrol.drop()", "def __gitBisectGood(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitBisect(self.project.getProjectPath(), \"good\") or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Bisect\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()", "def check_unique_import(\n loaded_db,\n name,\n version,\n nb_trials,\n nb_child_trials=0,\n nb_versions=1,\n algo_state=None,\n trial_links=None,\n ):\n _Helpers.check_db(\n loaded_db,\n nb_exps=1 * nb_versions,\n nb_algos=1 * nb_versions,\n nb_trials=nb_trials * nb_versions,\n nb_benchmarks=0,\n )\n for i in range(nb_versions):\n _Helpers.check_exp(\n loaded_db,\n name,\n version + i,\n nb_trials=nb_trials,\n nb_child_trials=nb_child_trials,\n algo_state=algo_state,\n trial_links=trial_links,\n )", "def check_commit(self, commit):\n # pylint: disable=too-many-branches\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug('check_commit() Checking mark={} sha1={} file-ct={} -- {}'\n .format( commit['mark']\n , p4gf_util.abbrev(commit['sha1'])\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if not commit['author_p4user']:\n raise PreflightException(_(\"User '{user}' not permitted to commit\")\n .format(user=commit['author']['email'].strip('<>')))\n\n if 'merge' in commit:\n ref_is_review = (self.gsreview_coll and\n self.gsreview_coll.ref_in_review_list(self._current_prt.ref))\n if not ref_is_review and not self.ctx.merge_commits:\n raise PreflightException(_('Merge commits are not enabled for this repo.'))\n if (not ref_is_review and\n not self.ctx.branch_creation and self.assigner.have_anonymous_branches):\n msg = _('Git branch creation is prohibited for this repo.')\n p4_branch_names_non_lw = [b.git_branch_name for b in self.ctx.branch_dict().values()\n if b.git_branch_name and not b.is_lightweight]\n if len(p4_branch_names_non_lw) > 1:\n msg += _('\\nThis repo has more than one named branch.'\n '\\nTry altering the push order - '\n 'pushing branches with merge ancestors first.')\n raise PreflightException(msg)\n if LOG.isEnabledFor(logging.DEBUG):\n for parent_mark in commit['merge']:\n parent_sha1 = self.fast_export_marks.get_commit(parent_mark)[:7]\n LOG.debug(\"check_commit() merge mark={} sha1={}\"\n .format(parent_mark, parent_sha1))\n\n if not self.ctx.submodules and 'files' in commit:\n for f in commit['files']:\n if f.get('mode') == '160000':\n if 'first_commit' in commit and not self._path_added(f.get('path'), commit):\n LOG.debug2('check_commit() passed {} in {}'.format(\n f.get('path'), p4gf_util.abbrev(commit['sha1'])))\n continue\n raise PreflightException(\n _('Git submodules not permitted: path={path} commit={commit_sha1}')\n .format(path=f.get('path'), commit_sha1=p4gf_util.abbrev(commit['sha1'])))\n\n for f in commit['files']:\n LOG.debug3(\"check_commit : commit files: \" + _log_fe_file(f))\n err = check_valid_filename(f['path'], self.ctx)\n if err:\n raise PreflightException(err)\n if self.ctx.is_lfs_enabled:\n self._check_lfs(commit, f)\n\n # Warn user about any jobs that appear to not exist\n jobs = G2PJob.lookup_jobs(self.ctx, G2PJob.extract_jobs(commit['data']))\n if jobs:\n for job_id in jobs:\n r = self.ctx.p4run('jobs', '-e', 'job={}'.format(job_id))\n if not r:\n _print_error(_(\"Job '{job_id}' doesn't exist\").format(job_id=job_id))\n # Create pending changes for any Git-Swarm reviews", "def write_version(settings, version, force=False):\n semver_path = settings['semver_path']\n filename = settings['semver_branch']\n path = os.path.join(semver_path, filename)\n logger.debug(f'write version:{version} to path:{path} with force:{force}')\n\n path_exists = os.path.exists(path)\n if path_exists:\n current_version = read_version(settings)\n if current_version == version:\n logger.debug(f'version is same as current version {current_version}')\n return\n\n if not path_exists or force:\n write_file(path, version)\n semver_repo = Repo(semver_path)\n index = semver_repo.index\n index.add([filename])\n semver_user_name = settings['semver_user_name']\n semver_user_email = settings['semver_user_email']\n author = Actor(semver_user_name, semver_user_email)\n index.commit(f'semver({filename}): {version}', author=author, committer=author, parent_commits=None)", "def test_heads_contains_false(repository: Repository) -> None:\n assert \"branch\" not in repository.heads", "def commit(self):\n raise multisearch.errors.FeatureNotAvailableError", "def stable():\n env.branch = 'stable'", "def _update_head(self, index_entry, branch, new_id):\r\n index_entry['versions'][branch] = new_id\r\n self.db_connection.update_course_index(index_entry)", "def check_force_generate(self, force):\n if self.exists() and not force:\n self.log.warn(\n '{} already exists, skipping key generation...'.format(self.key_file)\n )\n return False\n else:\n return True", "def update_index(self, ref_gen):\n testing = True\n logging.warning('Updating index')\n es_insert.index(es, ref_gen, self.index_name, testing, action=\"update\")\n logging.warning('Finished updating')", "def _commit_to_index( env_dict ):\n from indexer.solr import adapter as adapter_file\n\n adapter = adapter_file.adapter(env_dict)\n adapter.commit()\n if env_dict[\"indexer\"][\"optimise\"]:\n adapter.optimise(maxSegments=1)", "def release(store, name, constraints, branches):\n session = Session(store)\n\n # Branches is a list of (name, n_buckets, settings) tuples\n all_buckets = [session.get(Bucket, x, default=CREATE) for x in range(NUM_BUCKETS)]\n\n edited_settings = set.union(*[set(x[2].keys()) for x in branches])\n\n conflicting_experiments = set()\n valid_bucket_indices = []\n\n for idx, bucket in enumerate(all_buckets):\n if is_valid_bucket(bucket, edited_settings, constraints):\n valid_bucket_indices.append(idx)\n else:\n for entry in bucket.entries:\n # Determine if this entry is a potential conflict\n if set(entry.settings.keys()).isdisjoint(edited_settings):\n continue\n conflicting_experiment_id, _ = entry.key\n conflicting_experiments.add(conflicting_experiment_id)\n\n random.shuffle(valid_bucket_indices)\n\n for branch_name, n_buckets, settings in branches:\n key = [name, branch_name]\n bucket_indices = valid_bucket_indices[:n_buckets]\n\n if len(bucket_indices) < n_buckets:\n raise NotEnoughBucketsException(conflicts=conflicting_experiments)\n\n valid_bucket_indices = valid_bucket_indices[n_buckets:]\n\n for bucket_idx in bucket_indices:\n bucket = all_buckets[bucket_idx]\n\n bucket.add(key, settings, constraints)\n\n session.flush()", "def check_commit_for_branch( self\n , commit\n , branch_id\n , any_locked_files\n , case_conflict_checker ):\n rev = commit['sha1']\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug(\"check_commit_for_branch() \"\n \"Checking branch={} mark={} sha1={} file-ct={} -- {}\"\n .format( branch_id\n , commit['mark']\n , p4gf_util.abbrev(rev)\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if self._already_copied_commit(rev, branch_id):\n return\n\n # following checks assume client has been set for branch\n self.ensure_branch_preflight(commit, branch_id)\n with self.ctx.switched_to_branch(\n self._current_branch\n , set_client=self.set_client_on_branch_switch\n ):\n if case_conflict_checker:\n case_conflict_checker.read_fast_export_commit(\n commit, self._current_branch)\n\n # Empty commits require root-level .p4gf_placeholder to be mapped\n # in the current branch view.\n if not commit['files'] and not self._is_placeholder_mapped():\n raise PreflightException(\n _(\"Empty commit {sha1} not permitted. Git Fusion branch views\"\n \" must include root to permit empty commits.\")\n .format(sha1=p4gf_util.abbrev(rev)))\n\n with Timer(CHECK_PROTECTS):\n self._check_protects(commit['author_p4user'], commit['files'])\n\n with Timer(CHECK_OVERLAP):\n self._check_overlap(commit)\n\n # fetch the branch setting only, without cascading to repo/global config\n if self._current_branch.is_read_only:\n raise PreflightException(_(\"Push to branch {branch} prohibited.\")\n .format(branch=self._current_branch.git_branch_name))\n self._check_stream_writable(commit)\n self._check_stream_in_classic(commit)\n\n LOG.debug('checking locked files under //{}/...'.format(self.ctx.p4.client))\n if any_locked_files:\n # Convert the git commit paths to depotPaths\n files_in_commit = [self.ctx.gwt_path(f['path']).to_depot()\n for f in commit['files']]\n LOG.debug(\"files_in_commit {0}\".format(files_in_commit))\n for f in files_in_commit:\n if f in any_locked_files:\n # Collect the names (and clients) of users with locked files.\n # Report back to the pusher so they can take appropriate action.\n msg = _('{file} - locked by {user}').format(file=f,\n user=any_locked_files[f])\n LOG.info(msg)\n raise PreflightException(msg)\n\n # +++ Spend time extracting Jobs and P4Changelist owner\n # here if we actually do need to call\n # the preflight-commit hook.\n if self.ctx.preflight_hook.is_callable():\n jobs = G2PJob.extract_jobs(commit['data'])\n jobs2 = G2PJob.lookup_jobs(self.ctx, jobs)\n self.ctx.preflight_hook(\n ctx = self.ctx\n , fe_commit = commit\n , branch_id = branch_id\n , jobs = jobs2\n )", "def test_cherrypickhead_progress(repository: Repository, path: Path) -> None:\n createconflict(repository, path, ours=\"a\", theirs=\"b\")\n\n assert repository.cherrypickhead == repository.heads[\"update\"]", "def on_same_branch(left, right):\n left = LooseVersion(left)\n right = LooseVersion(right)\n return left.version[:2] == right.version[:2]", "def test_works_index_name(self):\n assert \"test_index-v4\" == self.search.works_index_name(self._db)", "def package_version_check(args, parser):\n if (args.build or args.check) and args.package_version:\n parser.error('--package-version works only with --create')", "def test_force_f(self, db, course_dir):\n run_nbgrader([\"db\", \"assignment\", \"add\", \"ps1\", \"--db\", db])\n run_nbgrader([\"db\", \"student\", \"add\", \"foo\", \"--db\", db])\n\n self._copy_file(join(\"files\", \"submitted-unchanged.ipynb\"), join(course_dir, \"source\", \"ps1\", \"p1.ipynb\"))\n self._make_file(join(course_dir, \"source\", \"ps1\", \"foo.txt\"), \"foo\")\n self._make_file(join(course_dir, \"source\", \"ps1\", \"data\", \"bar.txt\"), \"bar\")\n run_nbgrader([\"generate_assignment\", \"ps1\", \"--db\", db])\n\n self._copy_file(join(\"files\", \"submitted-unchanged.ipynb\"), join(course_dir, \"submitted\", \"foo\", \"ps1\", \"p1.ipynb\"))\n self._make_file(join(course_dir, \"submitted\", \"foo\", \"ps1\", \"foo.txt\"), \"foo\")\n self._make_file(join(course_dir, \"submitted\", \"foo\", \"ps1\", \"data\", \"bar.txt\"), \"bar\")\n run_nbgrader([\"autograde\", \"ps1\", \"--db\", db])\n\n self._make_file(join(course_dir, \"autograded\", \"foo\", \"ps1\", \"blah.pyc\"), \"asdf\")\n run_nbgrader([\"generate_feedback\", \"ps1\", \"--db\", db])\n\n assert isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"p1.html\"))\n assert isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.txt\"))\n assert isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"data\", \"bar.txt\"))\n assert not isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"blah.pyc\"))\n\n # check that it skips the existing directory\n remove(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.txt\"))\n run_nbgrader([\"generate_feedback\", \"ps1\", \"--db\", db])\n assert not isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.txt\"))\n\n # force overwrite the supplemental files\n run_nbgrader([\"generate_feedback\", \"ps1\", \"--db\", db, \"-f\"])\n assert isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.txt\"))\n\n # force overwrite\n remove(join(course_dir, \"autograded\", \"foo\", \"ps1\", \"foo.txt\"))\n run_nbgrader([\"generate_feedback\", \"ps1\", \"--db\", db, \"--force\"])\n assert isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"p1.html\"))\n assert not isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.txt\"))\n assert isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"data\", \"bar.txt\"))\n assert not isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"blah.pyc\"))", "def index(args):\n for level in ['family', 'language', 'dialect']:\n if args.args[0] in [level, 'all']:\n make_index(level)", "def test_finder_only_installs_stable_releases(data: TestData) -> None:\n\n req = install_req_from_line(\"bar\")\n\n # using a local index (that has pre & dev releases)\n finder = make_test_finder(index_urls=[data.index_url(\"pre\")])\n found = finder.find_requirement(req, False)\n assert found is not None\n assert found.link.url.endswith(\"bar-1.0.tar.gz\"), found.link.url\n\n # using find-links\n links = [\"https://foo/bar-1.0.tar.gz\", \"https://foo/bar-2.0b1.tar.gz\"]\n\n finder = make_test_finder(links)\n found = finder.find_requirement(req, False)\n assert found is not None\n assert found.link.url == \"https://foo/bar-1.0.tar.gz\"\n\n links.reverse()\n\n finder = make_test_finder(links)\n found = finder.find_requirement(req, False)\n assert found is not None\n assert found.link.url == \"https://foo/bar-1.0.tar.gz\"", "def updateDepTree(self,checked):\n\n if( self.name in checked ):\n return\n else:\n checked.append(self.name)\n\n for mod in self.parent.modules:\n if( mod.name != self.name ):\n mods = mod.reqmodules + mod.optmodules + mod.reqmodules_buildonly\n if( self.name in mods ):\n if( mod.mode != \"install\" or not mod.rebuild ):\n #if( mod.mode != \"install\" and not mod.rebuild ):\n if( mod.useLink ):\n print \"***\\t * WARNING: \" + mod.name + \" is in \\\"link\\\" mode, \" \\\n + \"if you want to rebuild it with the new dependencies set it to \\\"use\\\" mode...!!\"\n else:\n if( not self.parent.noAutomaticRebuilds ):\n if( mod.mode != \"install\" ):\n print \"***\\t * \" + mod.name + \" changed to \\\"install\\\" mode and rebuild Flag set to true!!\"\n mod.mode = \"install\"\n mod.rebuild = True\n mod.preCheckDeps()\n mod.updateDepTree(checked)" ]
[ "0.5562095", "0.5386104", "0.5354058", "0.53346205", "0.5217734", "0.5204411", "0.51927465", "0.5189931", "0.5149217", "0.5106516", "0.5092081", "0.5082172", "0.50748366", "0.5031404", "0.49861875", "0.4984854", "0.4967", "0.49571994", "0.49304572", "0.49161586", "0.48917702", "0.4843911", "0.48331442", "0.48251724", "0.48174083", "0.48174083", "0.48174083", "0.48121828", "0.48057586", "0.4803555", "0.47952485", "0.47927728", "0.47880954", "0.47815922", "0.47795552", "0.47749045", "0.47718036", "0.47441894", "0.47377223", "0.4721839", "0.4714696", "0.47076532", "0.469938", "0.46976325", "0.46933818", "0.4691282", "0.46815306", "0.46736583", "0.4673401", "0.4668706", "0.46625075", "0.4659269", "0.4657411", "0.46567935", "0.46557826", "0.4650685", "0.46492594", "0.46462268", "0.46381512", "0.4637841", "0.46177134", "0.4611174", "0.4607962", "0.4592112", "0.45874578", "0.45814526", "0.45694822", "0.45686644", "0.45551607", "0.45551607", "0.45408136", "0.45388326", "0.45388186", "0.45387086", "0.4538085", "0.45365527", "0.45355698", "0.45346743", "0.45297238", "0.45259005", "0.45250863", "0.45213246", "0.4518177", "0.45177832", "0.4514564", "0.45132154", "0.45132136", "0.45128414", "0.45044494", "0.4491442", "0.44868937", "0.44842285", "0.44833708", "0.4482836", "0.44817635", "0.44758776", "0.44722173", "0.4470674", "0.44668108", "0.44659254", "0.44619238" ]
0.0
-1
Lists all the blobs in the bucket.
def gcs_list_blobs(bucket_name): storage_client = client #storage.client() bucket = storage_client.get_bucket(bucket_name) blobs = bucket.list_blobs() blob_list = [] for blob in blobs: blob_list.append(blob.name) return blob_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_blobs(bucket_name):\n storage_client = storage.Client()\n\n # Note: Client.list_blobs requires at least package version 1.17.0.\n blobs = storage_client.list_blobs(bucket_name)\n\n return blobs", "def list_blobs(bucket_name):\n # bucket_name = \"your-bucket-name\"\n\n storage_client = storage.Client()\n print(storage_client.current_batch)\n # Note: Client.list_blobs requires at least package version 1.17.0.\n blobs = storage_client.list_blobs(bucket_name)\n\n # print(len([1 for blob in blobs]))\n for blob in blobs:\n print(blob.name)", "def list_blobs(bucket):\n bucket = default_bucket if bucket is None else bucket\n bucket_name = bucket if isinstance(bucket, str) else bucket.name\n blobs = gcs.list_blobs(bucket_name)\n return blobs", "def list_blobs(bucket_name):\n # bucket_name = \"your-bucket-name\"\n\n storage_client = storage.Client()\n\n # Note: Client.list_blobs requires at least package version 1.17.0.\n blobs = storage_client.list_blobs(bucket_name)\n\n return [blob.name for blob in blobs]", "def list_blobs(bucket_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n\n blobs = bucket.list_blobs()\n list_blobs = [blob.public_url for blob in blobs]\n return list_blobs", "def list_blobs(self, prefix=''):\n return [b.name for b in self.bucket.list_blobs(prefix=prefix)]", "def get_list_of_blobs(bucket_name, prefix=None, delimiter=None):\r\n\r\n # initialize client\r\n storage_client = storage.Client()\r\n\r\n # get list blobs\r\n blobs = storage_client.list_blobs(bucket_name, prefix=prefix, delimiter=delimiter)\r\n\r\n for blob in blobs:\r\n print(blob.name)\r\n\r\n if delimiter:\r\n print(\"Prefixes:\")\r\n for prefix in blobs.prefixes:\r\n print(prefix)\r\n\r\n return None", "def list_bucket(project: str, bucket: str, prefix: str = None) -> typing.List[typing.Any]:\n client = google.cloud.storage.Client(project)\n return list(client.list_blobs(bucket_or_name=bucket, prefix=prefix))", "def list_bucket(self, bucket):\n\n self.response.write(\"Listbucket result:\\n\")\n\n # Production apps should set page_size to a practical value.\n page_size = 1\n stats = cloudstorage.listbucket(bucket + \"/foo\", max_keys=page_size)\n while True:\n count = 0\n for stat in stats:\n count += 1\n self.response.write(repr(stat))\n self.response.write(\"\\n\")\n\n if count != page_size or count == 0:\n break\n stats = cloudstorage.listbucket(\n bucket + \"/foo\", max_keys=page_size, marker=stat.filename\n )", "def list_bucket(self, bucket):\n self.response.write('Listbucket result:\\n')\n\n page_size = 1\n stats = gcs.listbucket(bucket + '/foo', max_keys=page_size)\n while True:\n count = 0\n for stat in stats:\n count += 1\n self.response.write(repr(stat))\n self.response.write('\\n')\n\n if count != page_size or count == 0:\n break\n stats = gcs.listbucket(bucket + '/foo', max_keys=page_size,\n marker=stat.filename)", "def list_blobs():\n # bucket_name = \"your-bucket-name\"\n storage_client = storage.Client()\n # Note: Client.list_blobs requires at least package version 1.17.0.\n blobs = storage_client.list_blobs(bucket_name)\n\n numFiles = 0\n for blob in blobs:\n numFiles += 1\n return numFiles", "def list_buckets():\n for bucket in s3.buckets.all():\n print(bucket)", "def list_buckets():\n for bucket in BUCKET_MANAGER.all_buckets():\n print(bucket)", "async def fetch_file_list(client, bucket) -> List:\n # pylint: disable=invalid-name\n PG_HOSTNAME = config('PG_HOSTNAME')\n PG_DATABASE = config('PG_DATABASE')\n folder = f'backup/{PG_HOSTNAME}_{PG_DATABASE}'\n result = await client.list_objects_v2(Bucket=bucket, Prefix=folder)\n contents = result.get('Contents', None)\n file_list = list([])\n if contents:\n for content in contents:\n file_list.append(content.get('Key'))\n return file_list", "def blobs(self):\n def blob_iterator():\n with s3conn(self.access_id, self.secret) as s3:\n key_iter = s3.list_bucket(self.bucket, prefix=self.prefix+\"/\")\n for key in key_iter:\n blob = key[len(self.prefix)+1:]\n yield blob\n return blob_iterator", "def list_bucket(self, bucket_id=None):\n url = self.prism_endpoint + \"/wBuckets\"\n\n if bucket_id is not None:\n url = url + \"/\" + bucket_id\n\n headers = {\"Authorization\": \"Bearer \" + self.bearer_token}\n\n r = requests.get(url, headers=headers)\n\n if r.status_code == 200:\n logging.info(\"Successfully obtained information about your buckets\")\n return r.json()\n else:\n logging.warning(\"HTTP Error {}\".format(r.status_code))", "def get_all_files(self, **keyword_args):\n blobs = self.storage_client.list_blobs(\n self.bucket.name, **keyword_args)\n return blobs", "def list_bucket(self, bucket):\n self.response.write('Creating more files for listbucket...\\n')\n self.create_file(bucket + '/foo1')\n self.create_file(bucket + '/foo2')\n self.response.write('\\nListbucket result:\\n')\n\n page_size = 1\n stats = gcs.listbucket(bucket, max_keys=page_size)\n while True:\n count = 0\n for stat in stats:\n count += 1\n self.response.write(repr(stat))\n self.response.write('\\n')\n\n if count != page_size or count == 0:\n break\n last_filename = stat.filename[len(bucket) + 1:]\n stats = gcs.listbucket(bucket, max_keys=page_size, marker=last_filename)", "def ls(region_name=DEFAULT_REGION):\n s3conn = s3.connect_to_region(region_name)\n buckets = s3conn.get_all_buckets()\n for bucket in buckets:\n print(bucket.name)", "def list_objects(bucket=None):\n hook = GoogleCloudStorageHook()\n storage_objects = hook.list(bucket)\n\n return storage_objects", "def list_buckets():\n response = s3.list_buckets()\n # Output the bucket names\n print('Existing buckets:')\n for bucket in response['Buckets']:\n print(bucket[\"Name\"])", "def list_bucket_objects(bucket):\n for obj in BUCKET_MANAGER.all_objects(bucket).all():\n print(obj)", "def list(self, glob_pattern=\"\"):\r\n # strip relative path so we don't step outside our emulated storage area\r\n glob_pattern = force_local_path(glob_pattern)\r\n\r\n # analyze glob_pattern to determine how to return blob names\r\n\r\n # if glob_pattern is a folder\r\n if not glob_pattern:\r\n # default to all blobs at the root level\r\n glob_pattern = \"*\"\r\n elif is_folder(f\"{self._blob_folder()}/{glob_pattern}\"):\r\n # if glob_pattern is a folder, return all blobs within folder\r\n glob_pattern = f\"{force_trailing_slash(glob_pattern)}*\"\r\n else:\r\n # use glob_pattern as-is\r\n pass\r\n\r\n # retrieve sorted blob names\r\n target_path = f\"{self._blob_folder()}/{glob_pattern}\"\r\n\r\n # build list of blob names with local parent path stripped from names\r\n blob_names = list()\r\n for blob_name in sorted(glob.glob(target_path)):\r\n # format name using Linux path delimiters\r\n blob_name = blob_name.replace(chr(92), \"/\")\r\n blob_name = blob_name.replace(f\"{self._blob_folder()}/\", \"\")\r\n blob_names.append(blob_name)\r\n\r\n blob_count = len(blob_names)\r\n logger.debug(\r\n self._context(f\"list({glob_pattern}) returned {blob_count} blob names\")\r\n )\r\n logger.debug(self._context(f\"list({glob_pattern}) = {blob_names}\"))\r\n return blob_names", "def get_list_of_buckets():\r\n\r\n # initialize client\r\n storage_client = storage.Client()\r\n\r\n # get list of buckets\r\n buckets = storage_client.list_buckets()\r\n\r\n list_of_buckets = []\r\n for bucket in buckets:\r\n list_of_buckets.append(bucket.name)\r\n\r\n return list_of_buckets", "def blobs(self):\n if not self._blobs:\n workspace = self.attributes.workspace\n # Instantiates a google client, & get all blobs in bucket\n storage_client = storage.Client(project=self._user_project)\n bucket = storage_client.bucket(workspace['bucketName'], user_project=self._user_project)\n # get subset of data\n _blobs = {}\n try:\n for b in bucket.list_blobs(fields='items(size, etag, crc32c, name, timeCreated),nextPageToken'):\n name = f\"gs://{workspace['bucketName']}/{b.name}\"\n # cache.put(name, {'size': b.size, 'etag': b.etag, 'crc32c': b.crc32c, 'time_created': b.time_created, 'name': name})\n _blobs[name] = AttrDict({'size': b.size, 'etag': b.etag, 'crc32c': b.crc32c, 'time_created': b.time_created, 'name': name})\n self._blobs = _blobs\n except Exception as e:\n print(f\"{self.id} {workspace['bucketName']} {e}\")\n self._blobs = _blobs\n return self._blobs", "def list_buckets(self):\n return [x[\"Name\"] for x in self.s3_client.list_buckets()[\"Buckets\"]]", "def listBucket(self, clientCall):\n\t\ttry:\n\t\t\tresponse \t= clientCall.list_objects(Bucket=self.bucket)\n\t\t\tresponse \t= self.parseJson(\"Contents[].Key\", response)\n\n\t\t\tfor objects in response:\n\t\t\t\twrite(var=f\"{g}#\", color=w, data=f\"{objects}\")\n\t\t\t\tsleep(0.01)\n\n\t\t\treturn(response)\n\n\t\texcept botocore.exceptions.ClientError as e:\n\t\t\tif \"AccessDenied\" and \"ListObjects\" in e.args[0]:\n\t\t\t\treturn(None)", "def listAllBuckets(self):\n print self.getAllBuckets()", "def list_objects(self, bucket_name, prefix=None):\n url = self.endpoint + '/rest/v2/caches/' + self.cache_name + '?action=keys'\n res = self.infinispan_client.get(url, auth=self.basicAuth)\n data = res.content\n return data", "def list_files(bucket):\r\n s3 = boto3.client('s3')\r\n contents = []\r\n for item in s3.list_objects(Bucket=bucket)['Contents']:\r\n contents.append(item)\r\n\r\n return contents", "def list_objects(self, bucket_name, prefix='', offset=0, count=10000):\n\n objects, done = h3lib.list_objects(self._handle, bucket_name, prefix, offset, count, self._user_id)\n return H3List(objects, done=done)", "def get_list_of_files(bucket_name, path):\n client = get_client()\n bucket = client.get_bucket(bucket_name)\n blob = bucket.blob(filename)\n return blob", "async def la() -> Tuple[str]:\n li = []\n async with _create_client() as client:\n for bucket in (await client.list_buckets())['Buckets']:\n _ = await client.list_objects(Bucket=bucket['Name'])\n for item in _['Contents']:\n if item['Key'].endswith('/') is False:\n li.append(bucket['Name'] + '/' + item['Key'])\n logger.info('List all objects in all buckets.')\n return tuple(li)", "def get_bucketlist():\n pass", "def test_list(self):\n responses.add(\n responses.Response(\n method='GET',\n url='https://connection.keboola.com/v2/storage/buckets',\n json=list_response\n )\n )\n buckets_list = self.buckets.list()\n assert isinstance(buckets_list, list)", "def list_buckets():\n buckets = gcs.list_buckets()\n names = []\n for bucket in buckets:\n names.append(bucket.name)\n return names", "def list_buckets(self):\n response = self._client.list_buckets()\n return [item['name'] for item in response['Buckets']]", "def list(self, prefix='', delimiter='', marker='', headers=None):\r\n return BucketListResultSet(self, prefix, delimiter, marker, headers)", "def blob_stats(self):\n def blob_iterator():\n with s3conn(self.access_id, self.secret) as s3:\n key_iter = s3.list_bucket2(self.bucket, prefix=self.prefix+\"/\")\n for head in key_iter:\n blob = head[LIST_BUCKET_KEY][len(self.prefix)+1:]\n head['blob'] = blob\n yield head\n return blob_iterator", "def list_files_in_s3_bucket(bucket_name):\n s3 = boto3.client(\"s3\")\n bucket = s3.list_objects(Bucket=bucket_name)\n contents = [c[\"Key\"] for c in bucket[\"Contents\"]]\n return contents", "def list_s3_content(bucket, access_id, access_secret, prefix=\"\"):\n try:\n response = s3_client(access_id, access_secret).list_objects(Bucket=bucket, Prefix=prefix)\n objects = []\n if response.get(\"Contents\"):\n for key in response['Contents']:\n objects.append(key['Key'])\n return objects\n except ClientError as error:\n LOGGER.error(error)\n return None", "def get_objects(self):\r\n bucket = self._get_bucket()\r\n objs = []\r\n for key in bucket:\r\n objs.append(key)\r\n return objs", "def iterate_bucket_objects(self, bucket):\n client = self.credentials.session.client('s3')\n page_iterator = client.list_objects_v2(Bucket=bucket)\n if 'Contents' not in page_iterator:\n return []\n for item in page_iterator['Contents']:\n yield item", "def get_files_in_bucket(self, bucket_name):\n s3 = self.credentials.session.resource('s3')\n this_bucket = s3.Bucket(bucket_name)\n list_of_files = [s3file.key for s3file in this_bucket.objects.all()];\n return list_of_files", "def list_files_gcp(client: storage.client.Client, bucket_name: str) -> List[str]:\n blobs = client.list_blobs(bucket_name)\n files = [blob.name.split('.')[0] for blob in blobs]\n return files", "def list_all_buckets(riak_host,riak_port):\n url='http://%s:%s/buckets?buckets=true' % (riak_host,riak_port)\n r=requests.get(url)\n print json.dumps(r.json(), sort_keys=True, indent=4)", "def list_public_s3_objects(self):\n _response = urllib.urlopen(self.options.s3_bucket_url)\n _html = _response.read()\n\n if LOCAL_DEBUG:\n print _html\n\n try:\n assert \"AccessDenied\" not in _html\n assert \"NoSuchBucket\" not in _html\n except:\n print(\"ERROR: AccessDenied or NoSuchBucket for {}\".format(self.options.s3_bucket_url))\n raise", "def list_objects(self, bucket, prefix=None, include_metadata=False) -> list:\n objects = []\n kwargs = {'Bucket': bucket}\n if prefix:\n kwargs['Prefix'] = prefix\n\n is_truncated = True\n while is_truncated:\n response = self.client.list_objects_v2(**kwargs)\n if not response.get('Contents'):\n return objects\n\n for item in response['Contents']:\n if include_metadata:\n objects.append(item)\n else:\n objects.append(item['Key'])\n\n is_truncated = response['IsTruncated']\n kwargs['ContinuationToken'] = response.get('NextContinuationToken')\n\n return objects", "def read_bucket_objects(bucket_name):\n s3 = boto3.resource('s3')\n bucket = s3.Bucket(bucket_name)\n for obj in bucket.objects.all():\n response = obj.get()\n body = response['Body'].read().decode('utf-8')\n print(body)", "def ListFiles(s3):\n response = s3.list_objects(Bucket='mynewbucket123')\n for content in response.get('Contents', []):\n yield content.get('Key')", "def list_buckets(self):\n msg = \"list_buckets not implemented\"\n raise NotImplementedError(msg)", "def test_list_bucket_content(self):\n conn = boto3.resource('s3', region_name='us-east-1')\n # We need to create the bucket since this is all in Moto's 'virtual' AWS account\n conn.create_bucket(Bucket='foobucket')\n # Upload fake file to bucket\n s3 = boto3.client('s3')\n with open('test/test_resources/test_file', 'rb') as data:\n s3.upload_fileobj(data, 'foobucket', 'foofile')\n\n s3_connector = S3Connector()\n s3_connector.connect(\"default\")\n self.assertEqual(s3_connector.list_bucket_content(\n \"foobucket\"), [\"foofile\"])", "async def ls(path: str = None) -> Tuple[str]:\n async with _create_client() as client:\n if path is None:\n resp = await client.list_buckets()\n logger.info('List all buckets.')\n return tuple([item['Name'] for item in resp['Buckets']])\n else:\n _ = path.strip('/').split('/')\n kws = {'Bucket': _[0]}\n if len(_) > 1:\n kws.update({'Prefix': '/'.join(_[1:])})\n try:\n resp = await client.list_objects(**kws)\n logger.info(f'List objects in \"{path}\".')\n return tuple([\n f'/{_[0]}/' + item['Key'] for item in resp['Contents']\n ]) if 'Contents' in resp.keys() else ()\n except ClientError:\n return ()", "def getBlobs( self ):\n return self.__blobs;", "def ListCommand(self, args, sub_opts=None, headers=None, debug=0):\n listing_style = ListingStyle.SHORT\n get_bucket_info = False\n if sub_opts:\n for o, unused_a in sub_opts:\n if o == '-b':\n get_bucket_info = True\n if o == '-l':\n listing_style = ListingStyle.LONG\n if o == '-L':\n listing_style = ListingStyle.LONG_LONG\n if not args:\n # default to listing all gs buckets\n args = ['gs://']\n\n total_objs = 0\n total_bytes = 0\n for uri_str in args:\n uri = self.StorageUri(uri_str, debug=debug, validate=False)\n\n if not uri.bucket_name:\n # Provider URI: add bucket wildcard to list buckets.\n for uri in self.CmdWildcardIterator('%s://*' % uri.scheme,\n headers=headers, debug=debug):\n (bucket_objs, bucket_bytes) = self.PrintBucketInfo(uri, listing_style,\n headers=headers,\n debug=debug)\n total_bytes += bucket_bytes\n total_objs += bucket_objs\n\n elif not uri.object_name:\n if get_bucket_info:\n # ls -b request on provider+bucket URI: List info about bucket(s).\n for uri in self.CmdWildcardIterator(uri, headers=headers,\n debug=debug):\n (bucket_objs, bucket_bytes) = self.PrintBucketInfo(uri,\n listing_style,\n headers=headers,\n debug=debug)\n total_bytes += bucket_bytes\n total_objs += bucket_objs\n else:\n # ls request on provider+bucket URI: List objects in the bucket(s).\n for obj in self.CmdWildcardIterator(uri.clone_replace_name('*'),\n ResultType.KEYS,\n headers=headers, debug=debug):\n total_bytes += self.PrintObjectInfo(uri, obj, listing_style,\n headers=headers, debug=debug)\n total_objs += 1\n\n else:\n # Provider+bucket+object URI -> list the object(s).\n for obj in self.CmdWildcardIterator(uri, ResultType.KEYS,\n headers=headers, debug=debug):\n total_bytes += self.PrintObjectInfo(uri, obj, listing_style,\n headers=headers, debug=debug)\n total_objs += 1\n if listing_style != ListingStyle.SHORT:\n print ('TOTAL: %d objects, %d bytes (%s)' %\n (total_objs, total_bytes, MakeHumanReadable(float(total_bytes))))", "def find_google_cloud_storage_file_names(bucket, prefix=''):\n return list(bucket.list_blobs(prefix=prefix))", "def list_keys(self, bucket_name, prefix=None):\n url = self.endpoint + '/rest/v2/caches/' + self.cache_name + '?action=keys'\n res = self.infinispan_client.get(url, auth=self.basicAuth)\n data = res.content\n return data", "def list_objects(Bucket=None, Delimiter=None, EncodingType=None, Marker=None, MaxKeys=None, Prefix=None, RequestPayer=None):\n pass", "def list_bucket_files(project, bucket_name, referenced_files, verbose):\n if verbose:\n print(\"listing contents of bucket gs://\" + bucket_name)\n\n # set up storage client\n storage_client = storage.Client(project=project)\n\n # check if bucket exists\n try:\n bucket = storage_client.get_bucket(bucket_name)\n except:\n print(f'Bucket {bucket_name} does not exist!')\n exit(1)\n\n # Note: Client.list_bucket_files requires at least package version 1.17.0.\n blobs = storage_client.list_blobs(bucket_name)\n\n if verbose:\n print(\"finished listing bucket contents. processing files now in chunks of 1000.\")\n\n bucket_dict = dict()\n\n def extract_file_metadata(blob):\n blob_name = blob.name\n\n if blob_name.endswith('/'): # if this is a directory\n return None\n\n full_file_path = \"gs://\" + bucket_name + \"/\" + blob_name\n # support new submissions directory structure in Terra bucket\n submissions_dir = \"submissions\"\n if full_file_path.split('/', 4)[3] == submissions_dir:\n # new format is gs://bucket_id/submissions/submission_id/remaining_path\n submission_id = full_file_path.split('/', 5)[4]\n else:\n # old format is gs://bucket_id/submission_id/remaining_path\n # Splits the bucket file: \"gs://bucket_Id/submission_id/file_path\", by the '/' symbol\n # and stores values in a 5 length array: ['gs:', '' , 'bucket_Id', submission_id, file_path]\n # to extract the submission id from the 4th element (index 3) of the array\n submission_id = full_file_path.split('/', 4)[3]\n\n file_metadata = {\n \"file_name\": blob_name.split('/')[-1],\n \"file_path\": full_file_path,\n \"submission_id\": submission_id,\n \"size\": blob.size,\n \"is_in_data_table\": full_file_path in referenced_files,\n \"time_created\": blob.time_created\n }\n\n return file_metadata\n\n n_blobs = 0\n for page in blobs.pages: # iterating through pages is way faster than not\n if verbose:\n n_blobs += page.remaining\n print(f'...processing {n_blobs} blobs', end='\\r')\n for blob in page:\n file_metadata = extract_file_metadata(blob)\n if file_metadata:\n full_file_path = file_metadata['file_path']\n bucket_dict[full_file_path] = file_metadata\n\n if verbose:\n print(f'Found {len(bucket_dict)} files in bucket {bucket_name}')\n\n return bucket_dict", "def list_s3(bucket, prefix, ext):\n s3 = boto3.resource('s3')\n s3_bucket = s3.Bucket(bucket)\n\n if ext:\n ext = '.' + ext.lstrip('.')\n else:\n ext = ''\n\n counter = 0\n for item in s3_bucket.objects.filter(Prefix=prefix):\n counter += 1\n if counter % 5000 == 0:\n print(f'Found {counter} items so far', file=sys.stderr)\n\n key = item.key\n if not key.endswith(ext):\n continue\n\n # Write to stdout\n print(key)", "def list_objects(self, s3_prefix_path):\n bucket_name, prefix = S3Util.get_bucket_and_key(s3_prefix_path)\n bucket = self.s3_resource.Bucket(bucket_name)\n return [\"s3://\" + bucket_name + \"/\" + key.key for key in bucket.objects.filter(Prefix=prefix)]", "def ListFiles(bucketname, client, key):\n response = client.list_objects(Bucket=bucketname, Prefix=key)\n for content in response.get('Contents', []):\n yield content.get('Key')", "def list_files(self, container_name, dir_path):\n blobs = RetryHandler.retry(lambda: self.blob_client.list_blobs(container_name, prefix=dir_path))\n for b in blobs:\n yield b.name", "def list_buckets():\n pass", "def buckets(self):\n if self._s3client is None:\n raise BucketStorageUnavailableException()\n\n buckets = [Bucket(self, x.name, create=False) for x in self._s3resource.buckets.all()]\n return buckets", "def test_list_blobs(*args, **kwargs):\n bucket_or_name = args[0]\n prefix = kwargs['prefix']\n candidate_path = f'{bucket_or_name}/{prefix}'\n config_paths = []\n\n for c in config_hierarchy:\n if c.startswith(candidate_path):\n fn = '/'.join(c.split('/')[1:])\n b = Blob(bucket='dummy', name=fn)\n config_paths.append(b)\n\n return iter(config_paths)", "def List(self, prefix=''):\n\n bucket, bucket_path = self._ParseBucketAndPath(prefix)\n names = []\n request = self._service.objects().list(bucket=bucket, prefix=bucket_path)\n response = self._RunWithRetries(request.execute, self._CommonErrorMatcher)\n\n while response:\n if 'items' in response:\n names += [item['name'] for item in response['items']]\n\n if 'nextPageToken' in response:\n request = self._service.objects().list(\n bucket=bucket, prefix=bucket_path,\n pageToken=response['nextPageToken'])\n response = self._RunWithRetries(request.execute,\n self._CommonErrorMatcher)\n else:\n response = None\n\n return names", "def get_objects(self, bucket, s3_client=None):\n\n try:\n all_objects = s3_client.list_objects_v2(Bucket=bucket)\n except botocore.exceptions.EndpointConnectionError:\n logging.error(\"Couldn't connect to an S3 endpoint. If you're using an S3 compatible provider other than AWS, remember to set --s3-endpoint-url\")\n sys.exit(1)\n\n if 'Contents' in all_objects.keys():\n all_objects = [ k['Key'] for k in [ obj for obj in all_objects['Contents'] ]] # pylint: disable=unnecessary-comprehension\n else:\n all_objects = []\n\n return all_objects", "def ls(self):\n files = self.drive.files().list().execute().get(\"files\", [])\n for f in files:\n print(f[\"name\"], f[\"mimeType\"])", "def list_all_objects_s3(bucket, prefix, profile):\n s3 = boto3.Session(profile_name=profile).client('s3')\n keys = []\n continuation_token = \"\"\n\n while True:\n list_kwargs = dict(Bucket=bucket, Prefix=prefix)\n if continuation_token:\n list_kwargs['ContinuationToken'] = continuation_token\n resp = s3.list_objects_v2(**list_kwargs)\n keys += [x['Key'] for x in resp.get('Contents', [])]\n \n if not resp.get('IsTruncated'):\n break\n continuation_token = resp.get('NextContinuationToken')\n \n return keys", "def list(self, prefix=\"\"):\n try:\n list_rep = self.client.listdir(self.bucket + \"/\" + prefix)\n for i in list_rep:\n # Remove preceding bucket name and potential leading slash from returned key value\n i = i.replace(self.bucket, \"\").replace('tar', 'wsp.sz')\n if i[0] == '/': i = i[1:]\n yield i\n except pyhdfs.HdfsFileNotFoundException:\n pass", "def get_all(user_id):\n return Bucketlist.query.filter_by(created_by=user_id)", "def list_hnd(self, request, **kwargs):\n prefix = request.POST.get(\"prefix\", \"\")\n marker = request.POST.get(\"marker\", \"\")\n delimiter = request.POST.get(\"delimiter\", \"\")\n\n max_keys = int(request.POST.get(\"max_keys\", 1000))\n max_keys = max((1, max_keys)) # >= 1\n max_keys = min((1000, max_keys)) # <= 1000\n\n bucket = store.get_bucket()\n\n # prefix \"prefix\" with user dir\n eff_prefix = store.prefix_with_user_dir(request.user, prefix)\n\n # get list iterator from s3\n file_iter = bucket.list(prefix=eff_prefix, delimiter=delimiter,\n marker=marker, headers=None,\n encoding_type=None)\n\n # convert to list, try to get +1 item to be able\n # to determine if the results are truncated\n files = [key.key.split(\"/\", 1)[1]\n for key in itertools.islice(file_iter, 0, max_keys+1)]\n\n # if max_keys is less then there are more results\n # -> truncated = True\n truncated = len(files) > max_keys\n if truncated:\n # return 1 item less\n files = files[:-1]\n\n return self.create_response(request, {\n \"files\": files,\n \"truncated\": truncated\n })", "def files_storage_list(self, prefix='pipeline/', print_paths=False):\n\n return self.backend.files_storage_list(prefix=prefix, print_paths=print_paths)", "def list_objects_v2(Bucket=None, Delimiter=None, EncodingType=None, MaxKeys=None, Prefix=None, ContinuationToken=None, FetchOwner=None, StartAfter=None, RequestPayer=None):\n pass", "def list_blobs(rsf_file, output_format=None, stream=None):\n\n cmds = rsf.read(rsf_file)\n register = Register(cmds)\n\n utils.check_readiness(register)\n blobs = register.log.blobs\n\n if output_format == \"csv\":\n schema = register.schema()\n headers = [attr.uid for attr in schema.attributes]\n xsv.serialise(stream, blobs, headers)\n\n stream.seek(0)\n\n return None\n\n if output_format == \"json\":\n utils.serialise_json({repr(k): v for k, v in blobs.items()}, stream)\n\n stream.seek(0)\n\n return None\n\n return blobs.values()", "def list_buckets(schedule_id):\n\n from mist.api.poller.models import ListBucketsPollingSchedule\n sched = ListBucketsPollingSchedule.objects.get(id=schedule_id)\n try:\n sched.cloud.ctl.objectstorage.list_buckets(persist=False)\n except (PeriodicTaskLockTakenError, PeriodicTaskTooRecentLastRun) as exc:\n list_buckets.logger.warning(\n '%s failed with %r',\n sched.name, exc)\n except Exception as exc:\n list_buckets.logger.error(\n '%s failed with %r',\n sched.name, exc)", "def list_objects(self, prefix, limit=None, give_size=False, suffix=''):\n objects = []\n total_size = 0\n if not limit or limit > 1000:\n paginator = self.client.get_paginator(\"list_objects\")\n page_iterator = paginator.paginate(Bucket=self.bucket, EncodingType='url', Prefix=prefix)\n for page in page_iterator:\n if \"Contents\" in page:\n for key in page[\"Contents\"]:\n if suffix != '':\n if key[\"Key\"].endswith(suffix):\n objects.append(urllib.parse.unquote(key[\"Key\"]))\n total_size += key['Size']\n else:\n objects.append(key[\"Key\"])\n total_size += key['Size']\n else:\n response = self.client.list_objects_v2(Bucket=self.bucket, Prefix=prefix)\n if \"Contents\" in response:\n for key in response['Contents']:\n if suffix != '':\n if key[\"Key\"].endswith(suffix):\n objects.append(key[\"Key\"])\n total_size += key['Size']\n else:\n objects.append(key[\"Key\"])\n total_size += key['Size']\n if give_size:\n return list(map(lambda key: key.replace('%3D', '='), objects)), total_size\n else:\n return list(map(lambda key: key.replace('%3D', '='), objects))", "def get(self):\n\n upload_info = self.request.GET.get('upload_info')\n if upload_info:\n self.response.headers['content-type'] = 'text/plain'\n self.response.out.write(upload_info)\n return\n\n blobs = reversed(blobstore.BlobInfo.all().fetch(10))\n output = template.render('index.html', {'blobs': blobs})\n self.response.out.write(output)", "def get_buckets_for_user(self):\n s3 = self.credentials.session.resource('s3')\n bucket_list = [bucket.name for bucket in s3.buckets.all()]\n\n return bucket_list;", "def s3_list_files(prefix, as_generator=False):\n return _do_list_files(S3_BUCKET, prefix, as_generator=as_generator)", "def get_list_of_keys(self, bucket_name=None, callback_fn=None):\n list_of_keys = []\n if not callback_fn:\n callback_fn = lambda x: print(x.key)\n if bucket_name:\n buckets = [self.s3_.get_bucket(bucket_name)]\n else:\n buckets = [b for b in self.s3_.get_all_buckets() if b.name not in self.BLACKLIST]\n for bucket in buckets:\n for key in bucket.list():\n callback_fn(key)\n list_of_keys.append(key)\n return list_of_keys", "def manipulate_bucketlist():\n pass", "def list_buckets(args):\n\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"Config file not valid, please use the verify function to debug\")\n return\n\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n\n for group in config_json[\"groups\"]:\n print(group[\"name\"] + \":\")\n for bucket in group[\"buckets\"]:\n print(\"\\t\" + bucket[\"name\"])", "async def glob(self):\n # get the path string up to the wildcards\n try:\n pi1 = self._path.index(\"*\")\n except ValueError:\n pi1 = len(self._path)\n try:\n pi2 = self._path.index(\"?\")\n except ValueError:\n pi2 = len(self._path)\n pi = min(pi1, pi2)\n # using the prefix will cut down on the search space\n prefix = self._path[:pi]\n # get the wildcard\n wildcard = self._path[pi:]\n # set up the paginator\n paginator = self._conn_obj.conn.get_paginator(\"list_objects_v2\")\n parameters = {\n 'Bucket': self._bucket,\n 'Prefix': prefix\n }\n page_iterator = paginator.paginate(**parameters)\n files = []\n async for page in page_iterator:\n for item in page.get('Contents', []):\n fname = item['Key']\n # check that it matches against wildcard\n if fnmatch(fname, wildcard):\n files.append(item['Key'])\n return files", "def describe_objects(bucket_name):\n # Upload the file\n try:\n # Retrieve list of files in bucket\n response = s3.list_objects_v2(Bucket=bucket_name)\n files = response.get(\"Contents\")\n # Output file names\n for file in files:\n print(f\"file_name: {file['Key']}, size: {file['Size']}\")\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def aws_s3_ls(s3_uri: str, list_extended=False)->list:\n client = boto3.client(\"s3\")\n bucket, prefix = _extract_bucket_key(s3_uri)\n s3_objects = []\n cont_token = None\n while (True):\n if cont_token is None:\n kwargs = {\n \"Bucket\": bucket,\n \"MaxKeys\": 100,\n \"Prefix\": prefix\n }\n else:\n kwargs = {\n \"Bucket\": bucket,\n \"MaxKeys\": 100,\n \"Prefix\": prefix,\n \"ContinuationToken\": cont_token\n } \n try:\n response = client.list_objects_v2(**kwargs)\n if response[\"KeyCount\"] == 0:\n print (\"Requested s3 object doesn't exist.\")\n break\n for record in response[\"Contents\"]:\n if record[\"Size\"] > 0: # ignore just prefix names\n if list_extended:\n s3_objects.append((record[\"Size\"], \n record[\"LastModified\"].strftime(\"%Y%m%d %H:%M:%S.%s\"), \n record[\"Key\"]))\n else:\n s3_objects.append(record[\"Key\"])\n if response[\"IsTruncated\"]:\n cont_token = response[\"NextContinuationToken\"]\n else:\n break\n except Exception as exc:\n raise Error(\"Error {} occurred while listing objects.\".format(exc))\n return s3_objects", "def list_buckets(self, s3_client=None):\n\n s3_client = s3_client or self.s3_client\n\n try:\n return ([ bucket['Name'] for bucket in s3_client.list_buckets()['Buckets'] ])\n except botocore.exceptions.EndpointConnectionError:\n logging.error(\"Couldn't connect to an S3 endpoint. If you're using an S3 compatible provider other than AWS, remember to set --s3-endpoint-url\")\n sys.exit(1)\n except botocore.exceptions.ClientError as e:\n print(e)\n sys.exit(1)", "def ls(self, s3uri, **kwargs):\n return self.exec_command('ls %s' % (s3uri,), **kwargs)", "def get_keys(self):\n bucket = self.resource.Bucket(self.bucketname)\n return [key.key for key in bucket.objects.all()]", "def list(self, path, filename=None, start=None, stop=None, recursive=False, directories=False):\n storageScheme, keys = self.getkeys(\n path, filename=filename, directories=directories, recursive=recursive)\n keys = [storageScheme + \":///\" + key.bucket.name + \"/\" + key.name for key in keys]\n keys.sort()\n keys = select(keys, start, stop)\n return keys", "def blobs(self):\n blobs = pipeline(\n ftype_selector([FILE]),\n fmap(first),\n fmap(self.reverser),\n )(self.root.entries())\n return blobs", "def list_s3_files(bucket, prefix):\n \n s3 = boto3.client('s3')\n\n if type(prefix) != list:\n prefix = [prefix]\n \n # Loop over prefixes:\n file_list = []\n for p in prefix:\n \n # Load one prefix:\n response = s3.list_objects_v2(Bucket=bucket, Prefix=p)\n if response['KeyCount'] > 0:\n file_list = file_list + [d['Key'] for d in response['Contents']]\n while response['IsTruncated']:\n response = s3.list_objects_v2(Bucket=bucket, Prefix=p, StartAfter=file_list[-1])\n file_list = file_list + [d['Key'] for d in response['Contents']] \n \n return file_list", "def list(number=0):\n buckets = get_buckets(number)\n data = [(\"Name\", \"Versioned\", \"LifeCycle\")]\n col_width = [0, 0, 0]\n rows = []\n for name, bucket in buckets.items():\n v = bucket[\"versioning\"]\n l = bucket[\"lifecycle\"]\n v = v if v else \"Disabled\"\n if l:\n l = json.dumps(l, indent=1)\n else:\n l = \"None\"\n data.append((name, v, l))\n for row in data:\n for i, info in enumerate(row):\n col_width[i] = min(max(len(info) + 2, col_width[i]), 48)\n dashes = tuple((\"-\" * (width - 1) for width in col_width))\n data.insert(1, dashes)\n click.echo(f\"The status of the buckets:\")\n for row in data:\n output = \"\"\n for i in range(3):\n output += row[i].ljust(col_width[i])\n if not VERBOSE:\n click.echo(output)\n logger.info(output)", "def list_buckets(self, regex=\".*\", verbose=False):\n r = re.compile(regex)\n if verbose:\n return [b for b in self.client.list_buckets()['Buckets'] if r.match(b['Name'])]\n else:\n return [b['Name'] for b in self.client.list_buckets()['Buckets'] if r.match(b['Name'])]", "def list_s3(bucket, prefix, ext=None):\n s3 = boto3.resource('s3')\n s3_bucket = s3.Bucket(bucket)\n\n if ext:\n ext = '.' + ext.lstrip('.')\n else:\n ext = ''\n\n for item in s3_bucket.objects.filter(Prefix=prefix):\n key = item.key\n if not key.endswith(ext):\n continue\n\n yield key", "def list_all_keys(riak_host,riak_port,bucket):\n url='http://%s:%s/buckets/%s/keys?keys=true' % (riak_host,riak_port,bucket)\n #print url\n r=requests.get(url)\n print json.dumps(r.json(), sort_keys=True, indent=4)", "def list(\n self,\n bucket: str,\n prefix: str=None,\n delimiter: str=None,\n ) -> typing.Iterator[str]:\n raise NotImplementedError()", "def list_buckets_from_project(projectname):\n response = jsonify(\n admin.list_buckets_on_project_by_name(current_app.scoped_session(), projectname)\n )\n return response", "def list_available_tiles(prefix):\n\n files = []\n generator = hls_container_client.list_blobs(name_starts_with=prefix)\n for blob in generator:\n files.append(blob.name)\n return files" ]
[ "0.81295836", "0.80809796", "0.8063129", "0.80104107", "0.79991764", "0.7891442", "0.771796", "0.7614527", "0.757374", "0.7504958", "0.74911094", "0.74169266", "0.73137933", "0.72736335", "0.7269783", "0.72240984", "0.7204592", "0.7182615", "0.7155751", "0.71367675", "0.71151024", "0.7106814", "0.6919984", "0.6898795", "0.6889916", "0.68797797", "0.6870751", "0.6832451", "0.6791812", "0.67580414", "0.67227495", "0.6707942", "0.6677458", "0.66417694", "0.6629878", "0.6588292", "0.6539728", "0.65361226", "0.6506271", "0.6487749", "0.6464164", "0.6443255", "0.64359194", "0.64187723", "0.64157534", "0.6406138", "0.63670814", "0.63432145", "0.6338134", "0.633423", "0.63275063", "0.6287175", "0.62742704", "0.6270394", "0.6270205", "0.624025", "0.6238867", "0.6235499", "0.6227802", "0.62112206", "0.620957", "0.6200909", "0.61903316", "0.61492324", "0.6134529", "0.6131121", "0.61301214", "0.60760695", "0.607269", "0.6048615", "0.6013497", "0.599046", "0.5958408", "0.5957556", "0.5936025", "0.5920029", "0.59067225", "0.5893506", "0.58727837", "0.58573127", "0.58504134", "0.5832183", "0.58315194", "0.5819633", "0.58070934", "0.57894206", "0.5784265", "0.5779386", "0.5759396", "0.5758678", "0.57579935", "0.57562685", "0.57547694", "0.5750166", "0.5731678", "0.57311475", "0.5719248", "0.5712901", "0.5688824", "0.5684758" ]
0.7789013
6
Downloads a blob from the bucket.
def gcs_download_blob(bucket_name, source_blob_name, destination_file_name): storage_client = client #storage.Client() bucket = storage_client.get_bucket(bucket_name) blob = bucket.blob(source_blob_name) blob.download_to_filename(destination_file_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_blob(bucket_name, source_blob_name):\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n return blob", "def blob_download(blob_url):\n blob = storage.Object.from_url(blob_url)\n blobc = blob.download()\n return blobc", "def download_blob(bucket_name, source_blob_name):\n storage_client = storage.Client()\n\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n return blob.download_as_string().decode()", "def download_blob(url: str) -> io.BytesIO:\n storage_client = storage.Client()\n bucket_name = get_bucket_name(url)\n source_blob_name = get_blob_name(url)\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n f = io.BytesIO(blob.download_as_bytes())\n return f", "def download_bucket(blob_name, path_to_file):\r\n blob = bucket.blob(blob_name)\r\n blob.download_to_filename(path_to_file)", "def download_specific_blob(bucket_name, path_to_storage_file_name, download_file_name):\r\n\r\n try:\r\n # initialize client & get blob\r\n _, _, blob = create_client(bucket_name, path_to_storage_file_name)\r\n\r\n # set the path to source file\r\n blob.download_to_filename(download_file_name)\r\n \r\n except Exception as err:\r\n raise err\r\n sys.exit(1)\r\n \r\n else:\r\n print(f\"download blob '{path_to_storage_file_name}' succeed\")\r\n\r\n return None", "def get_blob(self, download_meta):\n bucket_name, key = self._get_bucket_key(download_meta)\n response = self.s3.get_object(Bucket=bucket_name,\n Key=key)\n return response['Body'].read().decode()", "def download_blob(bucket_name, source_blob_name, destination_file_name):\n storage_client = storage.Client()\n try:\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n \n blob.download_to_filename(destination_file_name)\n \n print('Blob {} downloaded to {}.'.format(\n source_blob_name,\n destination_file_name)) \n except:\n print(\"User does not have access to that bucket. Trying public link:\")\n gcs_url = 'https://%(bucket)s.storage.googleapis.com/%(file)s' % {'bucket':bucket_name, 'file':source_blob_name}\n urllib.urlretrieve(gcs_url, destination_file_name)\n print (\"Download complete\")", "def downloadBlob(self, oid, serial):\n\n key = s3_blob_filename(oid, serial)\n\n # Confirm blob cache directory is locked for writes\n cache_filename = self.fshelper.getBlobFilename(oid, serial)\n lock_filename = os.path.join(os.path.dirname(cache_filename), '.lock')\n assert os.path.exists(lock_filename)\n\n # Download\n self.bucket.download_file(key, cache_filename)\n os.chmod(cache_filename, stat.S_IREAD)\n\n # Cache bookkeeping\n self._blob_data_bytes_loaded += os.path.getsize(cache_filename)\n self._check_blob_size(self._blob_data_bytes_loaded)", "def download_blob(bucket_name, source_blob_name, destination_file_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n blob.download_to_filename(destination_file_name)\n\n print('Blob {} downloaded to {}.'.format(\n source_blob_name,\n destination_file_name))", "def get_blob(self, blob_name):\n return self.bucket.get_blob(blob_name)", "def download(self, bucket, object, filename=None):\n service = self.get_conn()\n downloaded_file_bytes = service \\\n .objects() \\\n .get_media(bucket=bucket, object=object) \\\n .execute()\n\n # Write the file to local file path, if requested.\n if filename:\n write_argument = 'wb' if isinstance(downloaded_file_bytes, bytes) else 'w'\n with open(filename, write_argument) as file_fd:\n file_fd.write(downloaded_file_bytes)\n\n return downloaded_file_bytes", "def download_blob(bucket_name, source_blob_name, destination_file_name):\n # bucket_name = \"your-bucket-name\"\n # source_blob_name = \"storage-object-name\"\n # destination_file_name = \"local/path/to/file\"\n\n bucket = storage_client.bucket(bucket_name)\n blobs = storage_client.list_blobs(bucket_name)\n for blob in blobs:\n print(blob.name)\n if (blob.name == source_blob_name):\n # Construct a client side representation of a blob.\n # Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve\n # any content from Google Cloud Storage. As we don't need additional data,\n # using `Bucket.blob` is preferred here.\n blob = bucket.blob(source_blob_name)\n blob.download_to_filename(destination_file_name)\n \n print(\n \"Blob {} downloaded to {}.\".format(\n source_blob_name, destination_file_name\n )\n )", "def download_blob(bucket_name, source_blob_name, destination_file_name):\n # The ID of your GCS bucket\n # bucket_name = \"your-bucket-name\"\n\n # The ID of your GCS object\n # source_blob_name = \"storage-object-name\"\n\n # The path to which the file should be downloaded\n # destination_file_name = \"local/path/to/file\"\n\n storage_client = storage.Client()\n\n bucket = storage_client.bucket(bucket_name)\n\n # Construct a client side representation of a blob.\n # Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve\n # any content from Google Cloud Storage. As we don't need additional data,\n # using `Bucket.blob` is preferred here.\n blob = bucket.blob(source_blob_name)\n blob.download_to_filename(destination_file_name)\n\n print(\n \"Downloaded storage object {} from bucket {} to local file {}.\".format(\n source_blob_name, bucket_name, destination_file_name))", "def download_blob(source_blob_name, destination_file_name, bucket_name=\"bts-ml-data\"):\n # bucket_name = \"your-bucket-name\"\n # source_blob_name = \"storage-object-name\"\n # destination_file_name = \"local/path/to/file\"\n\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n blob.download_to_filename(destination_file_name)\n\n print(\n \"Blob {} downloaded to {}.\".format(\n source_blob_name, destination_file_name\n )\n )", "def run(\n self,\n bucket: str = None,\n blob: str = None,\n project: str = None,\n chunk_size: int = None,\n credentials: dict = None,\n encryption_key: str = None,\n request_timeout: Union[float, Tuple[float, float]] = 60,\n ) -> str:\n # create client\n client = get_storage_client(project=project, credentials=credentials)\n\n # retrieve bucket\n bucket = self._retrieve_bucket(\n client=client, bucket=bucket, create_bucket=False\n )\n\n # identify blob name\n blob = self._get_blob(\n bucket,\n blob,\n chunk_size=chunk_size,\n encryption_key=encryption_key,\n )\n # Support GCS < 1.31\n return (\n blob.download_as_bytes(timeout=request_timeout)\n if hasattr(blob, \"download_as_bytes\")\n else blob.download_as_string(timeout=request_timeout)\n )", "def download(bucket, key):\n validate_bucket_name(bucket)\n validate_key_name(key)\n client = get_client()\n\n # do a buffered download\n bytes_io = io.BytesIO()\n client.download_fileobj(bucket, key, bytes_io)\n\n # hope that stuff is not too big, and just return content\n return bytes_io.getvalue()", "def get_blob(self, download_meta):\n blob_id = download_meta['blob_id']\n if isinstance(blob_id, str):\n blob_id = uuid.UUID(blob_id)\n session = self.DBSession()\n blob = session.get(Blob, blob_id) # was session.query(Blob).get(blob_id), rewritten for SA2.0\n return blob.data", "def _retrieve_blob(self, object_key):\n return self.s3_resource.Object(self.CVE_BUCKET, object_key).get()['Body'].read()", "def download(self, bucket_name, file_name, file_path):\n\n self.client.download_file(bucket_name, file_name, file_path)", "def loadS3Blob(self, oid, serial):\n\n # Check if it's already in the cache\n cache_filename = self.fshelper.getBlobFilename(oid, serial)\n if os.path.exists(cache_filename):\n return ZEO.ClientStorage._accessed(cache_filename)\n\n # If not, download from S3...\n # First, we'll create the directory for this oid, if it doesn't exist.\n self.fshelper.createPathForOID(oid)\n\n # OK, it's not here and we (or someone) needs to get it. We\n # want to avoid getting it multiple times. We want to avoid\n # getting it multiple times even accross separate client\n # processes on the same machine. We'll use file locking.\n\n lock = ZEO.ClientStorage._lock_blob(cache_filename)\n try:\n # We got the lock, so it's our job to download it. First,\n # we'll double check that someone didn't download it while we\n # were getting the lock:\n\n if os.path.exists(cache_filename):\n return ZEO.ClientStorage._accessed(cache_filename)\n\n # Actually download the blob. When this function\n # returns, it will have been sent. (The receiving will\n # have been handled by the asyncore thread.)\n self.downloadBlob(oid, serial)\n\n if os.path.exists(cache_filename):\n return ZEO.ClientStorage._accessed(cache_filename)\n\n raise ZODB.POSException.POSKeyError(\"No blob file\", oid, serial)\n finally:\n lock.close()", "def download_file(self, bucket_name, object_name, file_name):\n self._client.download_file(bucket_name, object_name, file_name)", "def download_blob(blob, file_group, destination, blob_service, progress_callback):\n\n def _wrap_callback(curr, total):\n if progress_callback:\n progress_callback(curr, total, destination)\n\n blob_service.get_blob_to_path(\n get_container_name(file_group), blob, destination,\n progress_callback=_wrap_callback)", "def __retrieve_from_bucket(fname):\n blob = BUCKET.blob(fname)\n json_data = json.loads(blob.download_as_string())\n return json_data", "def urlgrab(self, url, filename=None, **kwargs):\n blob_location = \"%s/%s\" % (self.base_path, url)\n self.verbose_logger.info(\"downloading gs://%s/%s to %s\" % (self.bucket.name, blob_location, filename))\n url = url.lstrip('/')\n if not filename:\n filename = url\n\n blob = storage.blob.Blob(name=blob_location,bucket = self.bucket)\n blob.download_to_filename(filename)\n return filename", "def get_raw(key: str, bucket: google.cloud.storage.bucket.Bucket) -> bytes:\n blob = google.cloud.storage.blob.Blob(name=key, bucket=bucket)\n return blob.download_as_string()", "def get_blob_url(self, download_meta):\n bucket_name, key = self._get_bucket_key(download_meta)\n location = self.s3.generate_presigned_url(\n ClientMethod='get_object',\n ExpiresIn=36*60*60,\n Params={'Bucket': bucket_name, 'Key': key})\n return location", "def _download_from_bucket(self, ext_filename, local_filename, force=False):\n if os.path.exists(local_filename) and not force:\n logging.info('File {} already exists. Not overwriting...'.format(local_filename))\n return\n if os.path.exists(local_filename) and force:\n logging.info('File {} already exists. Overwriting...'.format(local_filename))\n else:\n logging.info('File {} does not exist. Downloading...'.format(local_filename))\n\n Path(os.path.dirname(local_filename)).mkdir(parents=True, exist_ok=True)\n\n if self.s3:\n self.bucket.download_file(ext_filename, local_filename)\n logging.info('Downloaded {} to {}'.format(ext_filename, local_filename))\n if self.gs:\n try:\n client = storage.Client()\n bucket = client.get_bucket(self.bucket_name)\n blob = storage.Blob(ext_filename, bucket)\n blob.download_to_filename(local_filename)\n logging.info('Downloaded {} to {}'.format(ext_filename, local_filename))\n except:\n logging.warning('Downloading failed')\n\n i += 1", "def download_file(bucket_name: str, object_name: str, file_path: str):\n # pylint: disable=import-outside-toplevel\n from sotaque_brasileiro.utils import safe_getenv\n\n minio_client = Minio(\n safe_getenv(constants.MINIO_ENDPOINT.value),\n access_key=safe_getenv(constants.MINIO_ACCESS_KEY.value),\n secret_key=safe_getenv(constants.MINIO_SECRET_KEY.value),\n )\n minio_client.fget_object(bucket_name, object_name, file_path)", "def delete_blob(bucket_name, blob_name):\r\n\r\n # initialize client, get bucket, & get blob\r\n _, _, blob = create_client(bucket_name, blob_name)\r\n\r\n # delete blob\r\n blob.delete()\r\n\r\n print(\"blob {} deleted\".format(blob_name))", "def download_file(bucket, key, filename):\n validate_bucket_name(bucket)\n validate_key_name(key)\n client = get_client()\n\n client.download_file(bucket, key, filename)", "def get_image(filename):\n\n client.download_file(S3_BUCKET, filename, 'uploads/{}'.format(filename))", "def download_file(Bucket=None, Key=None, Filename=None, ExtraArgs=None, Callback=None, Config=None):\n pass", "def get(self, bucket: str, object_name: str) -> bytes:\n raise NotImplementedError()", "def OpenBlob(self, blob_key):\n return StringIO.StringIO(\n self._blobs[blobstore.BlobKey(unicode(blob_key))])", "def download_object(self, bucket, key, dest_path) -> None:\n self.resource.Bucket(bucket).download_file(key, dest_path)", "def get_file(self, file_name: str) -> BytesIO:\n fl = BytesIO()\n self.client.download_fileobj(self.bucket, file_name, fl)\n fl.seek(0)\n return fl", "def download_file(bucket,file_name):\n with open(file_name, 'wb') as f:\n s3.download_fileobj(bucket, file_name,f)\n print(file_name, \": is downloaded\")", "def fetch_and_extract(self, filename):\n # type: (Text) -> None\n\n blob = self.bucket.blob(filename)\n blob.download_to_filename(filename)\n\n with tarfile.open(filename, \"r:gz\") as tar:\n tar.extractall(self.data_dir)", "def download_from_blob():\n block_blob_service = BlockBlobService(account_name='project3twitter',\n account_key='<YOUR_ACCOUNT_KEY>')\n container_name = 'project3'\n block_blob_service.set_container_acl(container_name, public_access=PublicAccess.Container)\n\n # actual download\n block_blob_service.get_blob_to_path(container_name, 'word-count.txt', 'resource/word-count.txt')", "def delete_blob(bucket_name, blob_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(blob_name)\n\n blob.delete()\n\n print('Blob {} deleted.'.format(blob_name))", "def download_file_from_s3_public_bucket(bucket, object, output_file):\n botocore_config = Config(signature_version=UNSIGNED)\n s3_client = boto3.client(\"s3\", config=botocore_config)\n s3_client.download_file(bucket, object, output_file)", "def _download_file(bucket: str, key: str) -> str:\n tmp_file_name = f\"/tmp/logs\"\n\n try:\n with open(tmp_file_name, \"wb\") as data:\n s3cl.download_fileobj(bucket, key, data)\n except Exception as e:\n print(type(e).__name__, e)\n f = open(tmp_file_name, \"w\")\n f.write(\"\")\n f.close()\n try:\n with gzip.open(tmp_file_name, mode=\"rt\") as f:\n x = f.read()\n return x\n except Exception as e:\n print(type(e).__name__, e, key)\n return \"\"", "def loadBlob(self, oid, serial):\n start = time.time()\n try:\n blob_filename = self.storage.loadBlob(oid, serial)\n logger.debug('Fetched blob from ZEO in %ss' % (time.time() - start))\n except ZODB.POSException.POSKeyError:\n blob_filename = self.loadS3Blob(oid, serial)\n logger.debug('Fetched blob from S3 in %ss' % (time.time() - start))\n return blob_filename", "def s3_download(path):\n with s3_read(path):\n # Reading the file will cache the file locally.\n pass", "def download(self, bucket_name=None,\n object_key=None,\n dest=None):\n\n if bucket_name == None or \\\n object_key == None or \\\n dest == None:\n u_print(\" Error - argument is missing\")\n\n u_print_d('S3.download() - bucket=[{}] key=[{}] dest=[{}]'.format(bucket_name,\n object_key,\n dest))\n return self.s3.Object(bucket_name, object_key).download_file(dest)", "def get_bytes(bucket: str, key: str) -> bytes:\n logger.debug(f'Reading from s3://{bucket}/{key}')\n response = client().get_object(Bucket=bucket, Key=key)\n return response['Body'].read()", "def s3_get(url, temp_file):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)", "def s3_get(url, temp_file):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)", "async def fetch_file(self, download_url: str) -> bytes:\n log.debug(f\"Fetching file from branding repository: '{download_url}'.\")\n\n async with self.bot.http_session.get(download_url, params=PARAMS, headers=HEADERS) as response:\n if response.status != 200:\n raise RuntimeError(f\"Failed to fetch file due to status: {response.status}\")\n\n log.debug(\"Fetch successful, reading payload.\")\n return await response.read()", "def download(self, file_url):\n url = self.base_url + \"/storage-service/cloud-storage/s3/file/download?url={0}\".format(file_url)\n\n headers = {\"ApiKey\": self.api_key}\n response = requests.get(url=url, headers=headers)\n return response", "def get_blob(uuid, path=''):\n check_bundles_have_read_permission(local.model, request.user, [uuid])\n bundle = local.model.get_bundle(uuid)\n\n target_info = local.download_manager.get_target_info(uuid, path, 0)\n if target_info is None:\n abort(httplib.NOT_FOUND, 'Not found.')\n\n # Figure out the file name.\n if not path and bundle.metadata.name:\n filename = bundle.metadata.name\n else:\n filename = target_info['name']\n\n if target_info['type'] == 'directory':\n # Always tar and gzip directories.\n filename = filename + '.tar.gz'\n fileobj = local.download_manager.stream_tarred_gzipped_directory(uuid, path)\n elif target_info['type'] == 'file':\n if not zip_util.path_is_archive(filename) and request_accepts_gzip_encoding():\n # Let's gzip to save bandwidth. The browser will transparently decode\n # the file.\n filename = filename + '.gz'\n fileobj = local.download_manager.stream_file(uuid, path, gzipped=True)\n else:\n fileobj = local.download_manager.stream_file(uuid, path, gzipped=False)\n else:\n # Symlinks.\n abort(httplib.FORBIDDEN, 'Cannot download files of this type.')\n \n # Set headers.\n mimetype, _ = mimetypes.guess_type(filename, strict=False)\n response.set_header('Content-Type', mimetype or 'text/plain')\n if zip_util.get_archive_ext(filename) == '.gz' and request_accepts_gzip_encoding():\n filename = zip_util.strip_archive_ext(filename)\n response.set_header('Content-Encoding', 'gzip')\n else:\n response.set_header('Content-Encoding', 'identity')\n response.set_header('Content-Disposition', 'filename=\"%s\"' % filename)\n\n return fileobj", "def get_s3_object(bucket, key_name, local_file):\n\n tracer.put_metadata('object', f's3://{bucket}/{key_name}')\n\n try:\n s3_resource.Bucket(bucket).download_file(key_name, local_file)\n result = 'ok'\n tracer.put_annotation('OBJECT_DOWNLOAD', 'SUCCESS')\n except botocore.exceptions.ClientError as e:\n tracer.put_annotation('OBJECT_DOWNLOAD', 'FAILURE')\n if e.response['Error']['Code'] == '404':\n result = f'Error: s3://{bucket}/{key_name} does not exist'\n else:\n result = f'Error: {str(e)}'\n\n return(result)", "def _progress_blob(self, bucket, blob_name, progress_callback):\n return _ProgressBlob(\n name=blob_name,\n bucket=bucket,\n progress_callback=progress_callback\n )", "def download_from_s3(s3_resource, photo):\n try:\n bucket, key = photo.replace(\"s3://\", \"\").split(\"/\", 1)\n local_file = os.path.basename(photo)\n except ValueError as err:\n logger.exception(\"Couldn't get S3 info for %s: %s\", photo)\n raise\n\n try:\n logger.info(\"Downloading %s\", photo)\n s3_resource.Bucket(bucket).download_file(key, local_file)\n except ClientError:\n logger.exception(\"Couldn't download %s from S3.\", photo)\n raise\n\n return local_file", "def downloadObject(bucket:str, object:str, region:str, path:Path) -> None:\n client = boto3.client('s3', region_name=region)\n try:\n with open(str(path), 'wb') as data:\n client.download_fileobj(Bucket=bucket, Object=object, Data=data)\n except Exception as e:\n raise ObjectDownloadError(e)", "def DownloadFile(self, gcs_file_name, io_base):\n bucket, bucket_path = self._ParseBucketAndPath(gcs_file_name)\n\n # Check the size of the remote file. If it's empty, we have to return early\n # because the chunked downloader will crash. There aren't any contents to\n # retrieve in that case, anyway.\n object_data = self._RunWithRetries(\n self._service.objects().get(bucket=bucket, object=bucket_path).execute,\n self._CommonErrorMatcher)\n if ('name' not in object_data or object_data['name'] != bucket_path\n or 'size' not in object_data):\n raise CloudStorageApiError('Object data for %s is malformed.' %\n GcsPath(bucket, bucket_path))\n if int(object_data['size']) == 0:\n return\n\n request = self._service.objects().get_media(bucket=bucket,\n object=bucket_path)\n downloader = gapi_http.MediaIoBaseDownload(\n io_base, request, chunksize=1024*1024)\n done = False\n while not done:\n # The first return value indicates download progress, which we won't do\n # anything fancy with for now.\n _, done = self._RunWithRetries(downloader.next_chunk,\n self._CommonErrorMatcher)", "def generate_download_signed_url_v4(bucket_name, blob_name):\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(blob_name)\n\n url = blob.generate_signed_url(\n version=\"v4\",\n # This URL is valid for 15 minutes\n expiration=datetime.timedelta(minutes=30),\n # Allow GET requests using this URL.\n method=\"GET\",\n )\n\n return url", "def download_from_gcs(gcs_uri, target_path):\n bucket, blob_name = gcs_uri.replace('gs://', '').split('/', 1)\n client = storage.Client(project='embdatalab')\n bucket = client.get_bucket(bucket)\n prefix = blob_name.split('*')[0]\n unzipped = open(target_path, 'w')\n cmd = \"gunzip -c -f %s >> %s\"\n for blob in bucket.list_blobs(prefix=prefix):\n with tempfile.NamedTemporaryFile(mode='rb+') as f:\n logger.info(\"Downloading %s to %s\" % (blob.path, f.name))\n blob.chunk_size = 2 ** 30\n blob.download_to_file(f)\n f.flush()\n f.seek(0)\n subprocess.check_call(\n cmd % (f.name, unzipped.name), shell=True)\n return unzipped.name", "def download_file(cls, uri, fobj):\n # Breaks the URI into usable componenents.\n values = get_values_from_media_uri(uri)\n\n conn = cls._get_aws_s3_connection(values['username'],\n values['password'])\n bucket = conn.get_bucket(values['host'])\n key = bucket.get_key(values['path'])\n\n logger.debug(\"S3Backend.download_file(): \" \\\n \"Downloading: %s\" % uri)\n\n try:\n key.get_contents_to_file(fobj)\n except AttributeError:\n # Raised by ResumableDownloadHandler in boto when the given S3\n # key can't be found.\n message = \"The specified input file cannot be found.\"\n raise InfileNotFoundException(message)\n\n logger.debug(\"S3Backend.download_file(): \" \\\n \"Download of %s completed.\" % uri)\n return fobj", "def get_s3_object(self, remote_s3_url):\n try:\n _file = tempfile.mkstemp()[1]\n parsed_s3_path = remote_s3_url.split(\"/\", 3) # s3://bucket-name/key\n remote_bucket = parsed_s3_path[2] # Bucket name\n remote_key = parsed_s3_path[3] # Key\n self.download_file(remote_bucket, remote_key, _file)\n return _file\n except Exception as e:\n message = {'FILE': __file__.split('/')[-1],\n 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}\n self.logger.exception(message)\n raise", "def do_part_download(args):\r\n bucket_name, key_name, fname, min_byte, max_byte, split, secure, max_tries, current_tries = args\r\n conn = boto.connect_s3(calling_format=OrdinaryCallingFormat())\r\n conn.is_secure = secure\r\n\r\n # Make the S3 request\r\n resp = conn.make_request(\"GET\", bucket=bucket_name,\r\n key=key_name, headers={'Range':\"bytes=%d-%d\" % (min_byte, max_byte)})\r\n\r\n # Open the target file, seek to byte offset\r\n fd = os.open(fname, os.O_WRONLY)\r\n logger.debug(\"Opening file descriptor %d, seeking to %d\" % (fd, min_byte))\r\n os.lseek(fd, min_byte, os.SEEK_SET)\r\n\r\n chunk_size = min((max_byte-min_byte), split*1024*1024)\r\n logger.debug(\"Reading HTTP stream in %dM chunks\" % (chunk_size/1024./1024))\r\n t1 = time.time()\r\n s = 0\r\n try:\r\n while True:\r\n data = resp.read(chunk_size)\r\n if data == \"\":\r\n break\r\n os.write(fd, data)\r\n s += len(data)\r\n t2 = time.time() - t1\r\n os.close(fd)\r\n s = s / 1024 / 1024.\r\n logger.debug(\"Downloaded %0.2fM in %0.2fs at %0.2fMBps\" % (s, t2, s/t2))\r\n except Exception, err:\r\n logger.debug(\"Retry request %d of max %d times\" % (current_tries, max_tries))\r\n if (current_tries > max_tries):\r\n logger.error(err)\r\n else:\r\n time.sleep(3)\r\n current_tries += 1\r\n do_part_download(bucket_name, key_name, fname, min_byte, max_byte, split, secure, max_tries, current_tries)", "def download(\n bucket: str, key: str, file_path: str, session: Optional[boto3.Session] = None\n) -> str:\n s3_client = _get_client(session)\n\n LOGGER.info(\"downloading s3://%s/%s to %s...\", bucket, key, file_path)\n s3_client.download_file(Bucket=bucket, Key=key, Filename=file_path)\n return file_path", "def download_fileobj(Fileobj=None, Bucket=None, Key=None, ExtraArgs=None, Callback=None, Config=None):\n pass", "def get(self, key):\n key = str(urllib.unquote(key)).strip()\n logging.debug('key is %s', key)\n blob_info = blobstore.BlobInfo.get(key)\n self.send_blob(blob_info)", "def load(self, bucket, key):\n\n bucket = self._build_bucket_resource(bucket)\n\n with io.BytesIO() as stream:\n bucket.download_fileobj(key, stream)\n stream.seek(0)\n\n wrapper = io.TextIOWrapper(stream, encoding='utf-8')\n # Preserve the original order\n return json.load(wrapper, object_pairs_hook=collections.OrderedDict)", "def _s3_get_file(url):\n try:\n return S3().get_contents_from_url(url)\n except Exception as e:\n raise ScrBaseException(\"Could not load file from {0}: {1}\".format(url, e))", "def s3_get(url, temp_file, proxies=None):\n\ts3_resource = boto3.resource (\"s3\", config=Config (proxies=proxies))\n\tbucket_name, s3_path = split_s3_path (url)\n\ts3_resource.Bucket (bucket_name).download_fileobj (s3_path, temp_file)", "def list_blobs(bucket_name):\n # bucket_name = \"your-bucket-name\"\n\n storage_client = storage.Client()\n print(storage_client.current_batch)\n # Note: Client.list_blobs requires at least package version 1.17.0.\n blobs = storage_client.list_blobs(bucket_name)\n\n # print(len([1 for blob in blobs]))\n for blob in blobs:\n print(blob.name)", "def get_blob_content(container_name: str, blob_path: str) -> str:\n global BLOB_SERVICE_CLIENT\n # TODO: Should add retry policy here\n if not BLOB_SERVICE_CLIENT:\n logging.info(\n f\"{HEADER} Initialize blob service client for {DATABRICKS_OUTPUT_STORAGE_ACCOUNT_URL}\")\n BLOB_SERVICE_CLIENT = BlobServiceClient(DATABRICKS_OUTPUT_STORAGE_ACCOUNT_URL,\n credential=DATABRICKS_OUTPUT_STORAGE_SAS_TOKEN)\n blob_client = BLOB_SERVICE_CLIENT.get_blob_client(container=container_name, blob=blob_path)\n content = blob_client.download_blob().content_as_text()\n return content", "def load_pickle(self, bucket, key):\n\n with BytesIO() as obj_buffer:\n self._s3.Bucket(bucket).download_fileobj(key, obj_buffer)\n obj_buffer.seek(0)\n obj = pickle.load(obj_buffer)\n\n return obj", "def copy_blob(bucket_name, blob_name, destination_bucket_name, destination_blob_name):\r\n\r\n # initialize client, get bucket, & get blob\r\n storage_client, source_bucket, source_blob = create_client(bucket_name, blob_name)\r\n\r\n # set destination bucket name\r\n destination_bucket = storage_client.bucket(destination_bucket_name)\r\n\r\n # copy blob\r\n blob_copy = source_bucket.copy_blob(\r\n source_blob, destination_bucket, destination_blob_name\r\n )\r\n\r\n print(\r\n \"blob {} in bucket {} copied to blob {} in bucket {}.\".format(\r\n source_blob.name,\r\n source_bucket.name,\r\n blob_copy.name,\r\n destination_bucket.name,\r\n )\r\n )", "def get_file(object_name: str, **kwargs) -> HTTPResponse:\n data = client.get_object(DATASETS_BUCKET, object_name, **kwargs)\n return data", "def get_object(self, bucket_name, key, stream=False, extra_get_args={}):\n url = self.__key_url(bucket_name, key)\n res = self.infinispan_client.get(url, headers=self.headers, auth=self.basicAuth)\n data = res.content\n return data", "async def read(self, size=-1):\n # read the object using the bucket and path already determined in\n # __init__, and using the connection object\n try:\n # get the file size first\n file_size = await self._getsize()\n if size== -1:\n range_start = 0\n range_end = file_size\n range_size = file_size\n else:\n range_start = self._seek_pos\n range_end = self._seek_pos+size-1\n if range_end > file_size:\n range_end = file_size-1\n range_size = range_end-range_start+1\n\n # if multipart download is not supported\n if not self._multipart_download:\n # get the full file\n s3_object = await self._conn_obj.conn.get_object(\n Bucket = self._bucket,\n Key = self._path,\n )\n body = s3_object['Body']\n data = await body.read()\n # if the file is smaller than the MAXIMUM_PART_SIZE\n elif (range_size < self._part_size):\n # the requested range is the full file, it is fastest to\n # not specify the range\n if (range_start == 0 and range_size == file_size):\n # get the full file\n s3_object = await self._conn_obj.conn.get_object(\n Bucket = self._bucket,\n Key = self._path,\n )\n # a portion of the file is requested\n else:\n s3_object = await self._conn_obj.conn.get_object(\n Bucket = self._bucket,\n Key = self._path,\n Range = 'bytes={}-{}'.format(\n range_start, range_end\n )\n )\n body = s3_object['Body']\n data = await body.read()\n # multipart download version\n else:\n \"\"\"Use range get to split up a file into the MAXIMUM_PART_SIZE\n and download each part asynchronously.\"\"\"\n # calculate the number of necessary parts\n n_parts = int(range_size / self._part_size + 1)\n # don't go above the maximum number downloadable\n if n_parts > self._max_parts:\n n_parts = self._max_parts\n # (re)calculate the download size\n part_size = float(range_size) / n_parts\n # create the tasks and assign the return data buffer\n tasks = []\n data_buf = io.BytesIO()\n\n for p in range(0, n_parts):\n event_loop = asyncio.get_event_loop()\n task = event_loop.create_task(self._read_partial_file(\n p, part_size\n ))\n tasks.append(task)\n # wait for all the tasks to finish\n results = await asyncio.gather(*tasks)\n # read each chunk of data and write into the global buffer\n for r in results:\n data_buf.write(r)\n r = None # indicate ready for garbage collection\n data_buf.seek(0)\n data = data_buf.read()\n\n except ClientError as e:\n raise IOException(\n \"Could not read from object {} {}\".format(self._path, e)\n )\n except AttributeError as e:\n self._handle_connection_exception(e)\n return data", "def new_blob(self, blob_name):\n return storage.Blob(blob_name, self.bucket)", "def download_file(file_name, bucket_name, object_name=None):\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = os.path.basename(file_name)\n\n try:\n # Download file from bucket\n response = s3.download_file(Bucket=bucket_name, Key=object_name, Filename=file_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def _do_retrieve(bucket_name, key_path, number_retries=DEFAULT_S3_RETRIES):\n try:\n return conn.get_object(Bucket=bucket_name, Key=key_path, ResponseContentType='string')\n except Exception:\n if number_retries > 0:\n print(\"s3_retrieve failed with incomplete read, retrying on %s\" % key_path)\n return _do_retrieve(bucket_name, key_path, number_retries=number_retries - 1)\n raise", "def get_file_s3(bucket, key):\n \n client = boto3.client('s3')\n return client.get_object(Bucket=bucket, Key=key)['Body'].read().decode('utf-8')", "def download(url, bucket_id, key_prefix):\n\n baseFile = '_'.join(url.split('/')[-4:]) #os.path.basename(url)\n\n #move the file to a more uniq path\n os.umask(0002)\n temp_path = \"/tmp/\"\n file = os.path.join(temp_path,baseFile)\n bucket = conn.get_bucket(bucket_id)\n key = bucket.get_key(key_prefix + baseFile, validate=False)\n s3_exists = key.exists()\n file_exists = os.path.isfile(file)\n \n if not file_exists and s3_exists:\n sys.stderr.write(\"Downloading %s from S3\\n\"%url)\n key.get_contents_to_filename(file)\n sys.stderr.write(\"Downloaded %s from S3\\n\"%url)\n elif not file_exists and not s3_exists:\n sys.stderr.write(\"Downloading %s from the web\\n\"%url)\n try:\n req = urllib2.urlopen(url)\n total_size = int(req.info().getheader('Content-Length').strip())\n downloaded = 0\n CHUNK = 256 * 10240\n with open(file, 'wb') as fp:\n while True:\n chunk = req.read(CHUNK)\n downloaded += len(chunk)\n #print math.floor( (downloaded / total_size) * 100 )\n if not chunk: break\n fp.write(chunk)\n except urllib2.HTTPError, e:\n sys.stderr.write(\"HTTP Error: %s %s\\n\"%(e.code , url))\n return False\n except urllib2.URLError, e:\n sys.stderr.write(\"URL Error: %s %s\\n\"%(e.reason , url))\n return False\n sys.stderr.write(\"Downloaded %s from the web\\n\"%url)\n\n if not s3_exists:\n sys.stderr.write(\"Uploading %s to S3\\n\"%url)\n key.set_contents_from_filename(file)\n\n sys.stderr.write(\"File ready: %s\\n\"%url)\n return file", "def delete_blob(blob_name):\n # bucket_name = \"your-bucket-name\"\n # blob_name = \"your-object-name\"\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(blob_name)\n blob.delete()\n print('Blob {} deleted.'.format(blob_name))", "def list_blobs(bucket_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n\n blobs = bucket.list_blobs()\n list_blobs = [blob.public_url for blob in blobs]\n return list_blobs", "async def _download(self) -> None:\n\n # do request\n async with aiohttp.ClientSession() as session:\n async with session.get(self.url, auth=self._auth, timeout=self._timeout) as response:\n # check response\n if response.status == 200:\n # get data and return it\n self._buffer = await response.read()\n elif response.status == 401:\n log.error(\"Wrong credentials for downloading file.\")\n raise FileNotFoundError\n else:\n log.error(\"Could not download file from filecache.\")\n raise FileNotFoundError", "def _torrent_for_blob(blob, is_public):\n # Make sure the storage has a size.\n if not blob.compressed_size:\n abort(404)\n\n # Lookup the torrent information for the storage.\n torrent_info = registry_model.get_torrent_info(blob)\n if torrent_info is None:\n abort(404)\n\n # Lookup the webseed path for the storage.\n webseed = storage.get_direct_download_url(\n blob.placements, blob.storage_path, expires_in=app.config[\"BITTORRENT_WEBSEED_LIFETIME\"]\n )\n if webseed is None:\n # We cannot support webseeds for storages that cannot provide direct downloads.\n exact_abort(501, \"Storage engine does not support seeding.\")\n\n # Load the config for building torrents.\n torrent_config = TorrentConfiguration.from_app_config(instance_keys, app.config)\n\n # Build the filename for the torrent.\n if is_public:\n name = public_torrent_filename(blob.uuid)\n else:\n user = get_authenticated_user()\n if not user:\n abort(403)\n\n name = per_user_torrent_filename(torrent_config, user.uuid, blob.uuid)\n\n # Return the torrent file.\n torrent_file = make_torrent(\n torrent_config,\n name,\n webseed,\n blob.compressed_size,\n torrent_info.piece_length,\n torrent_info.pieces,\n )\n\n headers = {\n \"Content-Type\": \"application/x-bittorrent\",\n \"Content-Disposition\": \"attachment; filename={0}.torrent\".format(name),\n }\n\n return make_response(torrent_file, 200, headers)", "def fetch(self, url) -> bytes:\n buffer = self.download(url)\n zfs = ZipFileSystem(buffer, \"r\")\n return zfs.open(zfs.glob(\"*\")[0]).read()", "def list_blobs(bucket):\n bucket = default_bucket if bucket is None else bucket\n bucket_name = bucket if isinstance(bucket, str) else bucket.name\n blobs = gcs.list_blobs(bucket_name)\n return blobs", "def download_file_from_icos(icos_obj, bucket: str, local_file_name: str, key: str) -> None:\r\n try:\r\n icos_obj.download_file(Bucket=bucket, Key=key, Filename=local_file_name)\r\n except Exception as e:\r\n print(Exception, e)\r\n else:\r\n print('File `{}` downloaded from ICOS and saved locally as `{}`.'.format(key, local_file_name))", "def download(self, key, filename):\n self.resource.Bucket(self.bucket).download_file(key, filename)\n return filename", "def download_chain(s3_path, local_path, bucket_name='lwr-inverse-us-east'):\n s3 = boto3.resource(\"s3\")\n lwr_AIES = s3.Bucket(bucket_name)\n try:\n lwr_AIES.download_file(Key=s3_path, Filename=local_path)\n print(\"Download successful\")\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n print(\"The object does not exist.\")\n else:\n raise", "def list_blobs(bucket_name):\n storage_client = storage.Client()\n\n # Note: Client.list_blobs requires at least package version 1.17.0.\n blobs = storage_client.list_blobs(bucket_name)\n\n return blobs", "def upload_to_bucket(blob_name, file, bucket_name):\n\n # Explicitly use service account credentials by specifying the private key\n # file.\n storage_client = storage.Client.from_service_account_json('creds.json')\n\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(blob_name)\n blob.upload_from_file(file)\n\n # returns a public url\n return blob.public_url", "def get_file_download(self, bucket_id, file_id):\n\n \n path = '/storage/buckets/{bucketId}/files/{fileId}/download'\n params = {}\n if bucket_id is None:\n raise AppwriteException('Missing required parameter: \"bucket_id\"')\n\n if file_id is None:\n raise AppwriteException('Missing required parameter: \"file_id\"')\n\n path = path.replace('{bucketId}', bucket_id)\n path = path.replace('{fileId}', file_id)\n\n\n return self.client.call('get', path, {\n 'content-type': 'application/json',\n }, params)", "def download_from_s3(self, filename: str, filename_output: Optional[str] = None) -> str:\n if self.aws_access_key_id is None:\n raise Exception(\n 'To use `download_from_s3` you need to pass '\n '`aws_access_key_id` and '\n '`aws_secret_access_key`'\n )\n\n s3 = boto3.client('s3',\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key)\n\n # Progress bar\n meta_data = s3.head_object(Bucket=self.bucket_name, Key=filename)\n size = int(meta_data.get('ContentLength', 0))\n progress_bar = self._progress(size)\n\n # Downloading file\n s3.download_file(self.bucket_name, filename,\n filename if filename_output is None else filename_output,\n Callback=progress_bar)", "def download(self, bucket_name, key_name, fname):\n dname = os.path.dirname(fname)\n if dname and not os.path.exists(dname):\n os.makedirs(dname)\n bucket = self.s3_.get_bucket(bucket_name)\n key = bucket.get_key(key_name)\n return key.get_contents_to_filename(fname)", "def fetch(iid):\n if AWS_CLIENT_CONFIG and BUCKET_NAME:\n try:\n s3 = boto3.resource('s3', **AWS_CLIENT_CONFIG)\n obj = s3.Bucket(BUCKET_NAME).Object(iid).get()\n if obj:\n return obj.get('Body')\n except botocore.exceptions.ClientError as e:\n logger.error(e)\n else:\n # get locally from temp dir (tests, local development)\n return get_temp_file(iid)\n return None", "def fetch_and_extract(self, filename):\n # type: (Text) -> None\n\n with io.open(filename, 'wb') as f:\n self.bucket.download_fileobj(filename, f)\n with tarfile.open(filename, \"r:gz\") as tar:\n tar.extractall(self.data_dir)", "def get_object(self, container_name, object_name, download_path):\n obj = self.client.get_object(container_name, object_name)\n with open(download_path + object_name, 'w') as test_file:\n test_file.write(obj[1])\n return True", "def _download(self):\n self._system.download_file(\"http://curl.haxx.se/download/\" + self._tar_name)", "def download(handle):\n storage = get_storage()\n # FIXME: 404 if not found or invalid?\n return storage.route(handle)", "def get_remote_bytes(file_url) -> io.BytesIO:\n result = urlfetch.fetch(file_url)\n return io.BytesIO(result.content)" ]
[ "0.8135551", "0.8126195", "0.7890022", "0.78339165", "0.7721649", "0.77189684", "0.7649907", "0.7573001", "0.7502306", "0.74736196", "0.74572617", "0.7345375", "0.73441243", "0.72784907", "0.7213689", "0.7128273", "0.7065729", "0.6937288", "0.6928252", "0.6894445", "0.68285346", "0.6771118", "0.6566794", "0.650527", "0.6438687", "0.64302903", "0.6416173", "0.6409029", "0.6397378", "0.63945675", "0.638665", "0.63407785", "0.63147837", "0.62715214", "0.6262548", "0.624161", "0.6205678", "0.61963224", "0.6155657", "0.61530966", "0.61245006", "0.611263", "0.60940665", "0.6090566", "0.6088601", "0.6071537", "0.60596937", "0.6040488", "0.6040488", "0.6002172", "0.5999373", "0.59909165", "0.59675324", "0.5923368", "0.59098816", "0.5894061", "0.5890488", "0.58899784", "0.58667856", "0.5866474", "0.58513284", "0.5847841", "0.582473", "0.5798656", "0.57645994", "0.5751948", "0.5749541", "0.5737763", "0.5696356", "0.5694621", "0.56945485", "0.56900066", "0.5687712", "0.5679901", "0.5671524", "0.5670936", "0.56703305", "0.5665057", "0.56610024", "0.56574565", "0.5651831", "0.56483865", "0.5643131", "0.5636791", "0.56332004", "0.5617774", "0.56170154", "0.5614051", "0.56115985", "0.56067723", "0.56050444", "0.5587371", "0.55863833", "0.5547073", "0.55434656", "0.5540437", "0.55376863", "0.5533472", "0.55327046", "0.552546" ]
0.70821893
16
Current user will place order
def post(self, request, format=None): inventory_id = request.data.get('inventory_id', None) user_id = None if hasattr(request, 'user'): user_id = request.user.id if inventory_id is not None and user_id is not None: try: inventory_item = self.nv_models.Inventory.objects.get( pk=inventory_id) except self.nv_models.Inventory.DoesNotExist: return Response(status=status.HTTP_400_BAD_REQUEST) try: user = self.nv_models.ExtendedUser.objects.get(pk=user_id) except self.nv_models.ExtendedUser.DoesNotExist: return Response(status=status.HTTP_400_BAD_REQUEST) try: new_data = {} new_data['inventory'] = inventory_item new_data['customer'] = user new_order = self.nv_models.InventoryOrder(**new_data) new_order.save() return Response(self.InventoryOrderSerializer(new_order).data) except Exception as e: return Response(status=status.HTTP_400_BAD_REQUEST) return Response(status=status.HTTP_400_BAD_REQUEST)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def place_order(self):\n\n order_total = self.get_order_total()\n\n if self.person.can_afford(order_total):\n print 'This person is stinkin rich!'\n else:\n print \"No soup for you!\"", "def test_order_by_user(self):\n self.fill_session_cart()\n self.client.post(self.CHECKOUT_URL, self.build_checkout_form())\n self.assertEqual(OrderInfo.objects.get().user,\n USER_MODEL.objects.get())", "def place_buy_order(self):\n price = request.form[\"price\"]\n stocks = request.form[\"stocks\"]\n trader_id = request.form[\"trader_id\"]\n self.market.place_buy_order(trader_id, price, stocks)\n return \"\"", "def place(self):\n\n return super().place(self._order_params)", "def test_process_order_adds_coins_to_users_account_if_coins_in_order(self):\n self.client.force_login(User.objects.get_or_create(username=\"coinsuser\", email=\"coinsuser@email.com\")[0])\n\n current_user = User.objects.get(username=\"coinsuser\")\n\n request = HttpRequest()\n engine = import_module(settings.SESSION_ENGINE)\n session_key = None\n request.session = engine.SessionStore(session_key)\n\n # below two lines of code from https://stackoverflow.com/questions/11938164/why-dont-my-django-unittests-know-that-messagemiddleware-is-installed\n # fixes bug where test fails because unitest\n messages = FallbackStorage(request)\n setattr(request, '_messages', messages)\n\n random_coins_store_item = choice(StoreItem.objects.filter(is_coins=True))\n cart_add(request, random_coins_store_item.id)\n\n process_order(request, current_user)\n\n current_users_coins = UserCoins.objects.get(user=current_user).coins\n self.assertEqual(current_users_coins, random_coins_store_item.coins_amount)", "def slot_userorder(self, gox, (price, volume, typ, oid, status)):\r\n pass", "def place_order(self,name,address,quantity,contact):\n self.name = name\n self.address = address\n self.quantity = quantity\n self.contact = contact\n self.event = \"user_place_order\"\n self.query_1 = \"SELECT meal_price FROM menu WHERE meal_name=%s\"\n self.input_1 = (self.name,)\n self.query_2 = \"\"\"INSERT INTO public.orders (order_price,order_delivery_address,order_quantity,\n order_contact,order_status,user_id, meal_name) VALUES (%s,%s,%s,%s,%s,%s,%s)\"\"\"\n self.message = \"Order placed successfully.\"\n self.error = \"Unable to place order. The meal is not available\"", "def on_order(self, order: OrderData):\n # print(\"on_order\")\n # print(order)\n pass", "def place_order(self, **kwargs):\r\n create_options = self._generate_create_dict(**kwargs)\r\n return self.client['Product_Order'].placeOrder(create_options)", "def place_order(self, market: Market, order) -> bool:\n runner_context = self.get_runner_context(*order.lookup)\n if self.validate_order(runner_context, order):\n runner_context.place()\n market.place_order(order)\n return True\n else:\n return False", "def on_order(self, order: OrderData):\n pass", "def on_order(self, order: OrderData):\n pass", "def on_order(self, order: OrderData):\n pass", "def get_one_user_orders(self,user_id):\n\n sql = \"SELECT * FROM parcel_orders WHERE user_id='{}'\".format(user_id)\n self.db_object.cursor.execute(sql)\n placed_orders = self.db_object.cursor.fetchall()\n return placed_orders", "def test_order_from_user(self):\n new_user = self.create_test_user()\n new_order = OrderInfo.objects.create(user=new_user)\n self.assertEqual(new_order.user, new_user)", "def PlaceOrder(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_place_one_order(self):\n self.orders_list.place_order(self.order)\n self.assertEqual(len(self.orders_list.orders_list), 1)\n self.assertEqual(self.orders_list.orders_list[0].order_id, 0)", "def user_open_orders(request):\n qset = Order.objects.filter(\n receiver=request.user\n ).order_by(\n '-date_placed'\n )\n for order in qset:\n min_bid = get_min_bid(order)\n if not min_bid:\n order.min_bid = \"No current bids\"\n else:\n order.min_bid = min_bid\n\n data_dict = {}\n data_dict.update({ k : v.value\n for (k,v)\n in OrderAction.Action._member_map_.items()\n })\n data_dict['orders'] = qset\n\n return render(request, 'friendship/user_open_orders.html', data_dict)", "def create_order():", "def order(self, request):\n is_auth = request.session.get(\"is_auth\", False)\n if not is_auth:\n return HttpResponseRedirect('/crisis')\n\n uid = request.session['uid']\n context = RUNNING_INFO.get(uid, {})\n context.update({\"is_auth\": is_auth,\n \"is_daily\": request.session.get(\"is_daily\", False),\n \"is_leader\": request.session.get(\"is_leader\", False),\n \"entity_list\": ENTITY,\n \"parts_list\": PARTS,\n \"detail_info_list\": DETAIL_INFO})\n\n if \"priority\" not in context:\n priority = {}\n for item in ARMY:\n priority.update({item: 1})\n context.update({\"priority\": priority})\n\n if context.get(\"is_run\", False):\n context.update({\"left_time\": self.utils.get_remaining_time(uid),\n \"order\": self.utils.get_current_unit_order(uid)})\n \"\"\" Context Example\n context = {\"username\": self.utils.get_user_name(uid),\n \"is_run\": False,\n \"is_auth\": is_auth,\n \"resource\": {\"money\": 100, \"food\": 200, \"fuel\": 300},\n \"entity\": {\"armor_composite\": 1, \"armor_plate\": 2, \"control_block\": 3,\n \"gun_receiver\": 4, \"kevlar_fiber\": 5, \"laser_aimer\": 6,\n \"powder_charge\": 7, \"rare_item\": 8, \"tnt_charge\": 9},\n \"parts\": {\"artillery_armor\": 1, \"artillery_chassis\": 2, \"artillery_shell\": 3, \"detonator\": 4,\n \"gunner_armor\": 5, \"gunner_gun\": 6, \"jeep_armor\": 7, \"jeep_gun\": 8, \"sniper_armor\": 9,\n \"sniper_gun\": 10, \"soldier_gun\": 11, \"tank_chassis\": 12, \"thrower_armor\": 13,\n \"thrower_gun\": 14, \"wave_emitter\": 15},\n 'order': {'soldier': 1, 'thrower': 4, 'artillery': 8, 'gunner': 2, 'base_artillery': 7, 'jeep': 6, 'artillery_emp': 9, 'base_tank': 5, 'artillery_cassete': 0, 'sniper': 3}\n }\n \"\"\"\n\n if request.method == \"POST\":\n if \"start\" in request.POST:\n order, priority = {}, {}\n data = dict(request.POST)\n\n for item in ARMY:\n try:\n count = int(data.get(item, [''])[0])\n except:\n count = 0\n try:\n prior = int(data.get(\"%s_priority\" % item, [''])[0])\n except:\n prior = 1\n order.update({item: count})\n priority.update({item: prior})\n\n context.update({\"is_run\": True,\n \"order\": order,\n \"priority\": priority,\n \"left_time\": self.utils.get_remaining_time(uid)})\n\n RUNNING_INFO.update({uid: context})\n self.utils.start_gather(uid, context)\n elif \"stop\" in request.POST:\n uid = request.session['uid']\n context = RUNNING_INFO.get(uid, {})\n context.update({\"is_run\": False, \"left_time\": \"00:00:00\"})\n RUNNING_INFO.update({uid: context})\n self.utils.stop_gather(uid)\n\n return render_to_response(\"crisis/order.html\",\n context,\n context_instance=RequestContext(request))", "def test_make_order_by_authorised_user(self):\n\n\t\tres = self.login_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(result[\"message\"], \"Order succesfully posted\")\n\t\tself.assertEqual(response.status_code, 201)", "def update_order():", "def update_order():", "def save(self, *args, **kwargs):\n self.order_total = self.membership.price\n if not self.order_number:\n self.order_number = self._generate_order_number()\n super().save(*args, **kwargs)", "def cook_order(request):\n\torder_id = request.GET.get('order_id', 0)\n\tcs , status = CookStatus.objects.get_or_create(cook_name=request.user)\n\n\tif cs.current_order is None:\n\t\tcs.current_order = Order.objects.get(id=order_id)\n\t\tcs.current_order.status = 'cooking'\n\t\tcs.current_order.tikchen = request.user.username\n\t\tcs.current_order.save()\n\t\tcs.save()\n\n\treturn HttpResponseRedirect(\"/staff/cook_order_list/\")", "def create_order(request, order, transaction_id):\n\n\n order.transaction_id = transaction_id\n print transaction_id\n #order.ip_address = request.META.get('REMOTE_ADDR')\n order.user = None\n #if request.user.is_authenticated():\n # order.user = request.user\n order.status = Order.SUBMITTED\n\n DBSession.add(order)\n\n\n if order:\n \"\"\" if the order save succeeded \"\"\"\n cart_items = cart.get_cart_items(request).all()\n print \"The items in the cart are: \", len(cart_items)\n\n for ci in cart_items:\n \"\"\" create order item for each cart item \"\"\"\n\n print \"The product is \", ci.product\n oi = OrderItem()\n oi.order_id = order.id\n oi.order = order\n oi.quantity = ci.quantity\n print \"The product id is \", ci.product.id\n oi.product_id = ci.product.id\n oi.product = ci.product\n\n oi.price = ci.price # now using @property\n DBSession.add(oi)\n\n # all set, clear the cart\n cart.empty_cart(request)\n\n ## save profile info for future orders\n #if request.user.is_authenticated():\n # from ecomstore.accounts import profile\n #\n # profile.set(request)\n\n return order", "def place_order(self, custom_id=None, **params):\n self.conn.send('newOrder', custom_id=custom_id, **params)", "def order(self, typ, price, volume):\r\n self.count_submitted += 1\r\n self.client.send_order_add(typ, price, volume)", "def make_order(self) -> Order:\n return Order.objects.create(self.user, customer=self)", "def place(self, order_params):\n\n # Prevent multiple invocations with the same OID.\n if self.oid() is not None:\n return self.oid()\n\n # Common params across all orders\n # https://docs.gdax.com/?python#orders\n data = {\n 'side': self.__side,\n 'type': self.__order_type,\n 'product_id': self.__product,\n }\n data.update(order_params)\n\n log.info('placing ORDER')\n self.__resp = httpapi.post(\n common.api_url + 'orders',\n data=json.dumps(data),\n auth=common.auth,\n )\n\n return self.oid(), self.__resp", "def place_order(creator, **kwargs):\n if \"shipping_method\" not in kwargs:\n kwargs[\"shipping_method\"] = Free()\n\n shipping_charge = kwargs[\"shipping_method\"].calculate(kwargs[\"basket\"])\n\n kwargs[\"total\"] = calculators.OrderTotalCalculator().calculate(\n basket=kwargs[\"basket\"],\n shipping_charge=shipping_charge,\n surcharges=kwargs[\"surcharges\"],\n )\n kwargs[\"shipping_charge\"] = shipping_charge\n\n return creator.place_order(**kwargs)", "async def place_order(request: web.Request, body) -> web.Response:\n body = Order.from_dict(body)\n return web.Response(status=200)", "def create(self):\n return self.account.create_order(self)", "def request_orders(self):\r\n if self.use_http():\r\n self.enqueue_http_request(\"money/orders\", {}, \"orders\")\r\n else:\r\n self.send_signed_call(\"private/orders\", {}, \"orders\")", "def get_order(self):\n #store the orders for the current cycle inside the class\n self.orders = self.firebase.get_data(\"orders\")", "def _on_op_private_user_order(self, msg):\r\n order = msg[\"user_order\"]\r\n oid = order[\"oid\"]\r\n\r\n # there exist 3 fundamentally different types of user_order messages,\r\n # they differ in the presence or absence of certain parts of the message\r\n\r\n if \"status\" in order:\r\n # these are limit orders or market orders (new or updated).\r\n #\r\n # we also need to check whether they belong to our own gox instance,\r\n # since they contain currency this is easy, we compare the currency\r\n # and simply ignore mesages for all unrelated currencies.\r\n if order[\"currency\"] == self.curr_quote and order[\"item\"] == self.curr_base:\r\n volume = int(order[\"amount\"][\"value_int\"])\r\n typ = order[\"type\"]\r\n status = order[\"status\"]\r\n if \"price\" in order:\r\n # these are limit orders (new or updated)\r\n price = int(order[\"price\"][\"value_int\"])\r\n else:\r\n # these are market orders (new or updated)\r\n price = 0\r\n self.signal_userorder(self, (price, volume, typ, oid, status))\r\n\r\n else:\r\n # these are remove messages (cancel or fill)\r\n # here it is a bit more expensive to check whether they belong to\r\n # this gox instance, they don't carry any other useful data besides\r\n # the order id and the remove reason but since a remove message can\r\n # only affect us if the oid is in the owns list already we just\r\n # ask the orderbook instance whether it knows about this order\r\n # and ignore all the ones that have unknown oid\r\n if self.orderbook.have_own_oid(oid):\r\n # they don't contain a status field either, so we make up\r\n # our own status string to make it more useful. It will\r\n # be \"removed:\" followed by the reason. Possible reasons are:\r\n # \"requested\", \"completed_passive\", \"completed_active\"\r\n # so for example a cancel would be \"removed:requested\"\r\n # and a limit order fill would be \"removed:completed_passive\".\r\n status = \"removed:\" + order[\"reason\"]\r\n self.signal_userorder(self, (0, 0, \"\", oid, status))", "def order(self, order):\n self._order = order", "def subscribe_user_orders(self, update_handler):\n pass", "def on_order(self, order: OrderData):\n\n if order.vt_orderid not in (self.short_orders + self.long_orders):\n return\n\n self.pos_calculator.update_position(order)\n\n self.current_pos = self.pos_calculator.pos\n self.avg_price = self.pos_calculator.avg_price\n\n if order.status == Status.ALLTRADED:\n\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n self.trade_count += 1\n\n short_price = order.price + self.step_price\n if short_price <= self.high_price:\n orders = self.short(short_price, self.order_volume)\n self.short_orders.extend(orders)\n\n if len(self.long_orders) < self.max_open_orders:\n long_price = order.price - self.step_price * self.max_open_orders\n if long_price >= self.low_price:\n orders = self.buy(long_price, self.order_volume)\n self.long_orders.extend(orders)\n\n if order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n self.trade_count += 1\n long_price = order.price - self.step_price\n if long_price >= self.low_price:\n orders = self.buy(long_price, self.order_volume)\n self.long_orders.extend(orders)\n\n if len(self.short_orders) < self.max_open_orders:\n short_price = order.price + self.step_price * self.max_open_orders\n if short_price <= self.high_price:\n orders = self.short(short_price, self.order_volume)\n self.short_orders.extend(orders)\n\n if not order.is_active():\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n self.put_event()", "def place_order(id):\n data = services.validate_order(id, request.json, current_user)\n if \"error\" in data:\n return jsonify({\"message\": data[\"message\"]}), HTTPStatus.BAD_REQUEST\n order_key = f\"PENDING_ORDER#{current_user.id}#{data['event'].id}\"\n session[order_key] = [\n {\n \"package_id\": package.id,\n \"event_id\": data[\"event\"].id,\n \"sponsor_id\": current_user.id,\n \"price\": float(package.price),\n \"name\": package.name\n }\n for package in data[\"packages\"]\n ]\n session[\"user\"] = \"Facebook\"\n return jsonify({\"url\": url_for(\"payments.checkout\", event_id=data[\"event\"].id)})", "def test_order_view_permissions(client, user):\n random_user = UserFactory.create(is_staff=False, is_superuser=False)\n order = OrderFactory.create(user=user)\n client.force_login(random_user)\n resp = client.get(reverse(\"order-api\", kwargs={\"pk\": order.id}))\n assert resp.status_code == statuses.HTTP_403_FORBIDDEN\n order.user = random_user\n order.save()\n resp = client.get(reverse(\"order-api\", kwargs={\"pk\": order.id}))\n assert resp.status_code == statuses.HTTP_200_OK", "def set_momo_order_checkout(request, payment_mean, *args, **kwargs):\n service = get_service_instance()\n config = service.config\n if getattr(settings, 'DEBUG', False):\n order = parse_order_info(request)\n else:\n try:\n order = parse_order_info(request)\n except:\n return HttpResponseRedirect(reverse('shopping:checkout'))\n order.retailer = service\n order.payment_mean = payment_mean\n order.save() # Save first to generate the Order id\n order = Order.objects.get(pk=order.id) # Grab the newly created object to avoid create another one in subsequent save()\n\n member = request.user\n if member.is_authenticated():\n order.member = member\n else:\n order.aotc = generate_tx_code(order.id, order.anonymous_buyer.auto_inc)\n\n order.rcc = generate_tx_code(order.id, config.rel_id)\n order.save()\n request.session['object_id'] = order.id\n request.session['amount'] = order.total_cost", "def place_order(\n self,\n basket,\n total,\n shipping_method,\n shipping_charge,\n user=None,\n shipping_address=None,\n billing_address=None,\n order_number=None,\n status=None,\n request=None,\n **kwargs\n ):\n # Make sure basket isn't empty\n if basket.is_empty:\n # Translators: User facing error message in checkout\n raise ValueError(_(\"Empty baskets cannot be submitted\"))\n\n # Allocate an order number\n if not order_number:\n OrderNumberGenerator = get_class(\"order.utils\", \"OrderNumberGenerator\")\n generator = OrderNumberGenerator()\n order_number = generator.order_number(basket)\n\n # Figure out what status the new order should be\n if not status and hasattr(settings, \"OSCAR_INITIAL_ORDER_STATUS\"):\n status = getattr(settings, \"OSCAR_INITIAL_ORDER_STATUS\")\n\n # Make sure there isn't already an order with this order number\n if Order._default_manager.filter(number=order_number).exists():\n # Translators: User facing error message in checkout\n raise ValueError(\n _(\"There is already an order with number %(order_number)s\")\n % dict(order_number=order_number)\n )\n\n # Open a transaction so that order creation is atomic.\n with transaction.atomic():\n # Create the actual order.Order and order.Line models\n order = self.create_order_model(\n user,\n basket,\n shipping_address,\n shipping_method,\n shipping_charge,\n billing_address,\n total,\n order_number,\n status,\n request=request,\n **kwargs\n )\n for line in basket.all_lines():\n self.create_line_models(order, line)\n self.update_stock_records(line)\n\n # Make sure all the vouchers in the order are active and can actually be used by the order placing user.\n voucher_user = request.user if request and request.user else user\n for voucher in basket.vouchers.select_for_update():\n available_to_user, msg = voucher.is_available_to_user(user=voucher_user)\n if not voucher.is_active() or not available_to_user:\n raise ValueError(msg)\n\n # Record any discounts associated with this order\n for application in basket.offer_applications:\n # Trigger any deferred benefits from offers and capture the resulting message\n application[\"message\"] = application[\"offer\"].apply_deferred_benefit(\n basket, order, application\n )\n # Record offer application results\n if application[\"result\"].affects_shipping:\n # Skip zero shipping discounts\n shipping_discount = shipping_method.discount(basket)\n if shipping_discount <= Decimal(\"0.00\"):\n continue\n # If a shipping offer, we need to grab the actual discount off\n # the shipping method instance, which should be wrapped in an\n # OfferDiscount instance.\n application[\"discount\"] = shipping_discount\n self.create_discount_model(order, application)\n self.record_discount(application)\n\n # Record voucher usage for this order\n for voucher in basket.vouchers.all():\n self.record_voucher_usage(order, voucher, user)\n\n # Send signal for analytics to pick up\n order_placed.send(sender=self, order=order, user=user)\n\n # Done! Return the order.Order model\n return order", "def order(self, order):\n\n self._order = order", "def order(self, order):\n\n self._order = order", "def order(self, order):\n\n self._order = order", "def add_order(self, order):\n if order.is_bid:\n if order.price in self.buy_levels:\n limit = self.buy_levels[order.price]\n if limit.size == 0:\n self.buy_tree.size += 1\n limit.add(order)\n self.buy_map[order.uid] = order\n order.parent_limit = limit\n else:\n limit = Limit(order.price)\n limit.add(order)\n self.buy_map[order.uid] = order\n self.buy_tree.insert(limit)\n self.buy_tree.size += 1\n self.buy_levels[order.price] = limit\n order.parent_limit = self.buy_levels[order.price]\n if self.highest_buy is None or order.price > self.highest_buy:\n self.highest_buy = order.price\n else:\n if order.price in self.sell_levels:\n limit = self.sell_levels[order.price]\n if limit.size == 0:\n self.sell_tree.size += 1\n limit.add(order)\n self.sell_map[order.uid] = order\n order.parent_limit = self.sell_levels[order.price]\n else:\n limit = Limit(order.price)\n limit.add(order)\n self.sell_map[order.uid] = order\n self.sell_tree.insert(limit)\n self.sell_tree.size += 1\n self.sell_levels[order.price] = limit\n order.parent_limit = self.sell_levels[order.price]\n if self.lowest_sell is None or order.price < self.lowest_sell:\n self.lowest_sell = order.price\n self.update_book()", "def submit_order(request, orderid):\n if request.user.is_staff:\n order = WorkingOrder.objects.get(pk=orderid)\n else:\n order = request.user.workingorder_set.get(id=orderid) \n\n if order.status != BaseOrder.Const.DEALER_EDIT:\n return HttpResponseServerError()\n \n # always submit orders in the context of proper account\n account = order.owner.get_profile().account\n \n if request.method == 'GET': \n form = SubmitForm(instance=order)\n else:\n form = SubmitForm(request.POST, instance=order)\n if form.is_valid():\n order = form.save(commit=False)\n cost = order.cost or decimal.Decimal() \n if cost > account.credit_balance:\n ## users account doesn't have enough juice.. send then to the ecom engine \n ## to pay, then get them back here ...\n order = form.save()\n products = [form.cleaned_data['design_product']]\n option = form.cleaned_data.get('processing_option', None)\n if option:\n products.append(option) \n new_cart(request, products)\n request.method = 'GET' \n return paypal_checkout(request, success_url=reverse('submit-order', args=[orderid]))\n else: \n register_design_order(order.owner, order.owner.get_profile().account, order, cost)\n order = form.save(commit=False)\n order.status = BaseOrder.Const.SUBMITTED\n order.submitted = datetime.now()\n order.save()\n # return HttpResponseRedirect('completed_order_summary', args=[orderid]) # TODO\n return HttpResponseRedirect(reverse('submit-order-completed', args=[order.id])) \n return dict(order=order, form=form)", "def on_order(self, order, lvl, bp):\n\t\tS = order.symbol\n\n\t\tneed_bp = order.quantity * self.ticks[S].close\n\t\tif need_bp <= bp: # have enough buying power to place order\n\t\t\tused_bp = need_bp\n\n\t\t\tif lvl == 'hard_stop':\n\t\t\t\tself.on_hard_stop(S)\n\t\t\telif lvl == 'rebalance':\n\t\t\t\tself.on_rebalance(S)\n\n\t\t\tself.pos[order.symbol].confirm_order(order)\n\t\t\tlogger.info(\n\t\t\t\t'Publish Order={} for Strategy={}'\n\t\t\t\t.format(order, self.id)\n\t\t\t)\n\t\t\tself.basic_publish('order', sender=self.id, order=order)\n\t\telse:\n\t\t\tused_bp = 0\n\t\treturn used_bp", "def show_completed(self, user):\n\n orders = CartProduct.show_completed(user)\n CartProductsView.show_completed(orders)", "def customer_session_started_handler(self, data: dict, **kwargs) -> None:\n restaurant = self.env.manager.restaurant.get(data['restaurant_id'])\n if restaurant.available():\n self.env.publish('order_placed', data={\n 'customer_id': self.id,\n 'restaurant_id': data['restaurant_id']})", "def slot_user_order(self, dummy_sender, data):\r\n (price, volume, typ, oid, status) = data\r\n found = False\r\n removed = False # was the order removed?\r\n opened = False # did the order change from 'post-pending' to 'open'\"?\r\n voldiff = 0 # did the order volume change (full or partial fill)\r\n if \"executing\" in status:\r\n # don't need this status at all\r\n return\r\n if \"post-pending\" in status:\r\n # don't need this status at all\r\n return\r\n if \"removed\" in status:\r\n for i in range(len(self.owns)):\r\n if self.owns[i].oid == oid:\r\n order = self.owns[i]\r\n\r\n # work around MtGox strangeness:\r\n # for some reason it will send a \"completed_passive\"\r\n # immediately followed by a \"completed_active\" when a\r\n # market order is filled and removed. Since \"completed_passive\"\r\n # is meant for limit orders only we will just completely\r\n # IGNORE all \"completed_passive\" if it affects a market order,\r\n # there WILL follow a \"completed_active\" immediately after.\r\n if order.price == 0:\r\n if \"passive\" in status:\r\n # ignore it, the correct one with\r\n # \"active\" will follow soon\r\n return\r\n\r\n self.debug(\r\n \"### removing order %s \" % oid,\r\n \"price:\", self.gox.quote2str(order.price),\r\n \"type:\", order.typ)\r\n\r\n # remove it from owns...\r\n self.owns.pop(i)\r\n\r\n # ...and update own volume cache in the bids or asks\r\n self._update_level_own_volume(\r\n order.typ,\r\n order.price,\r\n self.get_own_volume_at(order.price, order.typ)\r\n )\r\n removed = True\r\n break\r\n else:\r\n for order in self.owns:\r\n if order.oid == oid:\r\n found = True\r\n self.debug(\r\n \"### updating order %s \" % oid,\r\n \"volume:\", self.gox.base2str(volume),\r\n \"status:\", status)\r\n voldiff = volume - order.volume\r\n opened = (order.status != \"open\" and status == \"open\")\r\n order.volume = volume\r\n order.status = status\r\n break\r\n\r\n if not found:\r\n # This can happen if we added the order with a different\r\n # application or the gox server sent the user_order message\r\n # before the reply to \"order/add\" (this can happen because\r\n # actually there is no guarantee which one arrives first).\r\n # We will treat this like a reply to \"order/add\"\r\n self.add_own(Order(price, volume, typ, oid, status))\r\n\r\n # The add_own() method has handled everything that was needed\r\n # for new orders and also emitted all signals already, we\r\n # can immediately return here because the job is done.\r\n return\r\n\r\n # update level own volume cache\r\n self._update_level_own_volume(\r\n typ, price, self.get_own_volume_at(price, typ))\r\n\r\n # We try to help the strategy with tracking the orders as good\r\n # as we can by sending different signals for different events.\r\n if removed:\r\n reason = self.gox.msg[\"user_order\"][\"reason\"]\r\n self.signal_own_removed(self, (order, reason))\r\n if opened:\r\n self.signal_own_opened(self, (order))\r\n if voldiff:\r\n self.signal_own_volume(self, (order, voldiff))\r\n self.signal_changed(self, None)\r\n self.signal_owns_changed(self, None)", "def click_placeorder(self):\n time.sleep(0.4)\n placeorder = self.driver.find_element_by_css_selector(self.CSS_PLACEORDER)\n action = TouchActions(self.driver)\n action.tap(placeorder).perform()\n WebDriverWait(self.driver, 20).until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#order-no')))\n order_id=self.driver.find_element_by_css_selector('#order-no').text\n return order_id", "def test_get_one_order(self):\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.assertEqual(self.orders_list.get_order(1), self.order)", "def m_ts_OrderAdded(self, sender, e):\r\n print(\"Order was added with price of {0}.\".format(e.Order.LimitPrice))", "def select_user_and_add_transaction(self):\n def add_transaction(to_user):\n print(\"Amount of transaction:\")\n amount = input()\n new_transaction = transaction.Transaction(amount)\n to_user.add_transaction(new_transaction)\n\n try:\n selected_user = self.prompt_user_selection()\n add_transaction(selected_user)\n except ValueError:\n print(\"No changes made.\")", "def update_orders(comp, order, user_correct, payment_id):\n users_orders = []\n for item in order.items.all():\n users_orders.append(item.id)\n item.is_paid = True\n item.save()\n order.related_competition = comp\n order.payment_id = payment_id\n order.order_date = timezone.now()\n order.answer_correct = user_correct\n order.ordered = True\n order.save()\n return order", "def set_order(self, order):\n self.order = order", "def set_order(self, order):\n self.order = order", "def place(self, order):\n assert isinstance(order, Order)\n order_id = self.__order_count\n self.__pending_orders[order_id] = order\n self.__order_count = order_id + 1\n return order_id", "def order(self, stock, amount):\n self.orders[stock] = amount", "def get_one_order():", "def test_process_order(self):\n checkout_form = self.get_checkout_form()\n checkout_form.submit()\n\n sleep(0.5)\n result = self.browser.find_element_by_id('result')\n self.assertIn(\n \"Your order was placed.\",\n result.text\n )\n self.assertTrue(len(self.client.session['cart']) == 0)\n OrderInfo.objects.get()", "def order_ready(request):\n\tcs , status = CookStatus.objects.get_or_create(cook_name=request.user)\n\tif cs.current_order is not None:\n\t\tcs.current_order.status = 'ready-to-serve'\n\t\tcs.current_order.save()\n\t\tcs.current_order = None\n\t\tcs.save()\n\n\treturn HttpResponseRedirect(\"/staff/cook_order_list/\")", "def prepare_order(request):\n distributer_id = request.GET.get('distributer')\n billing_address_id = request.GET.get('bill_addr')\n pickup_method = 2 # this is left in place if we ever decide to have door-to-door deliveries - otherwise it should be deleted\n cart = Cart.objects.get_or_create(user=request.user, processed_to_order=False)[0]\n user_bill_addr = UserBillingAddress.objects.get_or_create(pk=billing_address_id, user=request.user)[0]\n distributer = Distributer.objects.get(pk=distributer_id)\n\n # Create order\n order = Order()\n order.user = request.user\n order.distributer = distributer\n order.subtotal = cart.subtotal\n order.tax_total = cart.tax_total\n order.total = cart.total\n order.discount_for_returned_package = 0 #TODO implement returned packaging\n order.to_pay = 0 #TODO implement returned packaging\n order.delivery_method = pickup_method\n order.save()\n\n # create all order items\n for item in cart.cartitem_set.all():\n order_item = OrderItem()\n order_item.order = order\n order_item.item_name = str(item.item)\n order_item.item_price = item.item.price\n order_item.item_quantity = item.quantity\n order_item.item_decimal_quantity = 0 #TODO implement decimal quantity\n order_item.item_unit_of_measure = \"kom\" #TODO implement decimal quantity\n order_item.item_tax_bracket = item.item.tax_bracket\n order_item.item_subtotal = item.line_subtotal\n order_item.item_tax_total = item.line_tax_total\n order_item.item_total = item.line_tax_total\n if item.item.package_type == None:\n order_item.item_package = None\n order_item.item_package_price = 0\n else:\n order_item.item_package = item.item.package_type.type\n order_item.item_package_price = item.item.package_type.price\n order_item.item_package_subtotal = item.line_package_subtotal\n order_item.item_package_tax_total = item.line_package_tax_total\n order_item.item_package_total = item.line_package_total\n order_item.save()\n\n billing_address = OrderBillingAddress()\n billing_address.order = order\n billing_address.name = user_bill_addr.name\n billing_address.surname = user_bill_addr.surname\n billing_address.street_name = user_bill_addr.street_name\n billing_address.street_nr = user_bill_addr.street_nr\n billing_address.zip_code = user_bill_addr.zip_code\n billing_address.city = user_bill_addr.city\n billing_address.country = user_bill_addr.country\n billing_address.vat_nr = user_bill_addr.vat_nr\n billing_address.vat_taxpayer = user_bill_addr.vat_taxpayer\n billing_address.save()\n\n return redirect(reverse('orders_overview', kwargs={'pk': str(order.pk)}))", "def __orderAddItem(self, order, item):\n cursor = self.__db.cursor()\n iID = self.__id.getID(\"orderitem\")\n cursor.execute(\"INSERT INTO `orderItems` (`insertionID`, `orderID`, `itemID`) VALUES (%s, %s, %s);\",\n (iID, order, item))\n return iID", "def place_order(self, order_event):\n self._check_day_data(order_event.order_time)\n if order_event.order_type == 'MARKET':\n self._fill_market_order(order_event)\n elif order_event.order_type == 'LIMIT':\n if self._check_limit_order(order_event, order_event.order_time):\n pass\n self.resting_orders.append(order_event)", "def set_order_done():\n data = select_data_source()\n user = data['user']\n order_id = data['id']\n \n if check_user_permission(user) : return permission_denied_return\n \n db = database.getdb()\n \n ### Check if is valid.\n \n cmd = 'select passed from orders where id==\"{0}\"'.format(order_id)\n order_valid = db.execute(cmd).fetchall()[0][0]\n if order_valid == 0 :\n return finish_invalid_return\n \n ### Check if is done.\n cmd = 'select done from orders where id==\"{0}\"'.format(order_id)\n order_done = db.execute(cmd).fetchall()[0][0]\n if order_done != 0 :\n return finish_done_return\n \n ### All check done.\n ### Set it to done.\n cmd = 'update orders set done=1 where id==\"{0}\"'.format(order_id)\n db.execute(cmd)\n db.commit()\n print('user sets order {0} to be done.'.format(user))\n \n return finish_complete_return", "def order_success(self, request):\n order = self.order_from_request(request)\n\n if not order:\n return self.order_new(request)\n\n if not order.balance_remaining:\n self.set_order_on_request(request, order=None)\n\n\n order_data = OrderData.objects.get(order=order)\n o_data = simplejson.loads(order_data.data)\n\n paymentData = {}\n paymentData['delivery_address2'] = o_data['delivery_address2']\n paymentData['billing_address2'] = o_data['billing_address2']\n paymentData['delivery_date'] = o_data['delivery_date']\n paymentData['delivery_state'] = o_data['delivery_state']\n paymentData['billing_state'] = o_data['billing_state']\n paymentData['salutation'] = o_data['salutation']\n paymentData['contact_number'] = o_data['billing_contact_number']\n\n #try:\n oPayment = OrderPayment.objects.get(order=order)\n oPayment.payment_method = o_data['order_payment_method']\n oPayment.data = simplejson.dumps(paymentData)\n oPayment.save()\n #except:\n # pass\n\n \"\"\"\n order update note\n \"\"\"\n notes = o_data['order_notes']\n order.notes = notes\n order.save()\n\n # st_save_helper(request, order)\n\n \"\"\"\n sbid = None\n\n if 'customer_styleboard' in request.session:\n sbid = request.session.get('customer_styleboard').id\n\n if 'personalize_id' in request.session:\n print \"There's a personalize_id\"\n \"\"\"\n\n current_user = User.objects.get(id=int(request.user.id))\n\n if 'ipn_emailed' in o_data and o_data['ipn_emailed']:\n\n pass\n \n else:\n\n emailed = send_email_order(order, current_user, notes, paymentData['contact_number'], self)\n\n logr.info('emailed order confirmation to : %s from order success' % current_user.email)\n\n\n order_data.delete() # not needed after saving to order payment\\\n \n clear_styleboard_session(request)\n\n try:\n del request.session['customer_styleboard']\n del request.session['personalize_id']\n except:\n pass\n\n return self.render(request, 'plata/shop_order_success.html',\n self.get_context(request, {\n 'order': order,\n 'progress': 'success',\n }))", "def test_update_order_by_user(client):\n resp = put_json(client, '/v1/orders/1/', {\n \"status\": \"Complete\", },\n headers={'Authorization': 'Bearer ' + user_two(client)})\n assert resp.status_code == 401\n assert b'Not authorized' in resp.data", "def order(request):\n if request.method == 'GET':\n try:\n order = Order.objects.filter()\n serializer = OrderSerializer(order, many=True)\n except Order.DoesNotExist:\n message = 'An order does not exist in this ID({})!'.format(order)\n data = {'error': message}\n return Response(data, status=status.HTTP_403_FORBIDDEN)\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n if request.method == 'POST':\n user = request.data['user']\n products = request.data['products']\n\n try:\n user_qry = User.objects.get(username=user)\n except User.DoesNotExist:\n message = 'An user does not exist in this name({})!'.format(user)\n data = {'error': message}\n return Response(data, status=status.HTTP_403_FORBIDDEN)\n\n try:\n total_amount = 0\n for prd in products:\n prd_qry = Product.objects.get(id=prd)\n total_amount += prd_qry.price\n except Product.DoesNotExist:\n message = 'An product does not exist in this ID({})!'.format(prd)\n data = {'error': message}\n return Response(data, status=status.HTTP_403_FORBIDDEN)\n\n ordr = Order()\n ordr.user = user_qry\n ordr.total_amount = total_amount\n ordr.save()\n ordr.product.set(products)\n\n data = {'Success': 'Success'}\n return Response(data, status=status.HTTP_201_CREATED)", "def send_exchange_request(self, user):\n self.current['user'] = user\n self.send_request(user, self.KM_REQUEST_KEY)", "def mark_ready_for_review(self, user: User) -> None:\n from .exceptions import OperationForbiddenError, OrderEmptyError\n\n # If order is not in the \"CREATED\" state, raise an\n # OperationForbiddenError\n if not self.is_created:\n raise OperationForbiddenError(\n self.STATE_CHANGE_FORBIDDEN_ERROR_MSG % {\n 'current_state': Order.OrderState.get_choice_display(\n self.state\n ),\n 'new_state': Order.OrderState.PENDING.choice_display\n }\n )\n\n # If the order's item list is empty, raise an OrderEmptyError\n if not self.orderitem_set.exists():\n raise OrderEmptyError(\n self,\n 'An order should contain at least one Order item before it '\n 'can be marked as \"PENDING\".'\n )\n\n # Update the order to \"PENDING\" state\n self.update(user, state=Order.OrderState.PENDING.choice_value)", "def _send_to_market(self, order, is_mine):\n \n \n ord_type = order[self.col_idx['ordtype']]\n if ord_type == \"new\":\n self.mkt.send(is_buy=order[self.col_idx['is_buy']],\n qty=order[self.col_idx['qty']],\n price=order[self.col_idx['price']],\n uid=order[self.col_idx['uid']],\n is_mine=is_mine,\n timestamp=order[self.col_idx['timestamp']])\n elif ord_type == \"cancel\":\n self.mkt.cancel(uid=order[self.col_idx['uid']])\n elif ord_type == \"modif\":\n self.mkt.modif(uid=order[self.col_idx['uid']], \n new_qty=order[self.col_idx['qty']])\n else:\n raise ValueError(f'Unexpected ordtype: {ord_type}')", "def sender_order(self, sender_order):\n\n self._sender_order = sender_order", "def add_item(self, order_item):\n self.order_items.append(order_item)", "def store_order(self, store_name, store_orders,\\\n order_id, order_details):\n #store the details of the order in dictionary\n if order_details[\"ready\"] == True:\n print(\"order number {} is ready\".format(order_id))\n #save the information of all customers\n customers_data = self.firebase.get_data(\"customers\")\n message_information = {\n #store the current information\n 'order_id' : order_id,\n 'store_name' : store_name,\n 'store_orders' : store_orders,\n 'order_details' : order_details,\n #get the order details\n 'food' : order_details['food'],\n 'persons_id' : order_details['id'],\n }\n\n #add the persons details\n customer = customers_data[message_information['persons_id']]\n #print(customer)\n message_information['service'] = customer['service']\n message_information['mobile'] = customer['mobile']\n\n #If there is additional information, append it. Else, just put NA\n try:\n message_information['addit_info'] = order_details['addit_info']\n except:\n message_information['addit_info'] = 'NA'\n\n #Create the message\n self.create_message(message_information)\n\n else:\n # Update time_waited\n order_time = order_details[\"order_time\"]\n if order_time != \"None\":\n if order_details[\"ready\"]!=\"true\" or order_details[\"ready\"]!=\"waiting for collection\":\n time_elapsed = int(time.time()) - order_details[\"order_time\"]\n self.firebase.update([\"orders\", STORE_NAME, order_id], {\"time_waited\": time_elapsed})", "def send_admin_about_order(order):\n send_email('Flask ecommerce, New order', sender=current_app.config['ADMINS'][0],\n recipients=current_app.config['ADMINS'],\n text_body=render_template('email/new_order.txt', order=order),\n html_body=render_template('email/new_order.html', order=order))", "def test_buy_order(self):\n\n user = self.set_auth_token_header()\n\n # set account buying power\n account = user.account\n account.available_bp = 1000\n account.save()\n\n data = {\n 'stock': 'GOOG',\n 'quantity': 15,\n 'price': 1.25,\n 'order_type': 'BUY'\n }\n\n # Order create API\n url = reverse('orders-list')\n response = self.client.post(url, data=data)\n # order created\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n # account balance should've been updated\n acc = Account.objects.get(user=user)\n self.assertEqual(acc.available_bp, 981.25)\n\n # stock shares should've been updated\n shares = StockShare.objects.get(account=acc)\n self.assertEqual(shares.quantity, 15.0)\n self.assertEqual(shares.total_value, 18.75)\n self.assertEqual(shares.stock.code, 'GOOG')", "def place_order(self, instrument, lower, upper, units=1, side_arg='buy'):\n if (side_arg == 'buy'):\n return self.oanda.create_order(self.account_id, instrument=instrument,\n units=units, side=side_arg,\n stopLoss=lower, takeProfit=upper,\n type='market')\n elif (side_arg == 'sell'):\n return self.oanda.create_order(self.account_id, instrument=instrument,\n units=units, side=side_arg,\n stopLoss=upper, takeProfit=lower,\n type='market')", "def orders(self, orders):\n\n self._orders = orders", "def orders(self, orders):\n\n self._orders = orders", "def resolve_order(info, id):\n order = get_node(info, id, only_type=Order)\n user = info.context.user\n if (order.user == user or user.get_all_permissions() & {\n 'order.view_order', 'order.edit_order'}):\n return order", "def click_place_order(self):\n WebDriverWait(self.driver, 20).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, self.CSS_PLACEORDER)))\n placeorder = self.driver.find_element_by_css_selector(self.CSS_PLACEORDER)\n action = TouchActions(self.driver)\n action.tap(placeorder).perform()", "def generate_initial_order(self):\n self.ensure_one()\n # Add only active lines\n agreement_lines = self.mapped('agreement_line').filtered('active_chk')\n order = self.create_order(self.start_date, agreement_lines)\n self.write({'state': 'first'})\n order.signal_workflow('order_confirm')\n # Return view with order created\n return {\n 'domain': \"[('id', '=', %s)]\" % order.id,\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'sale.order',\n 'context': self.env.context,\n 'res_id': order.id,\n 'view_id': [self.env.ref('sale.view_order_form').id],\n 'type': 'ir.actions.act_window',\n 'nodestroy': True\n }", "def buy(self, irc, msg, args, optlist, amount, thing, price, otherthing, notes):\n self.db.deleteExpired(self.registryValue('orderExpiry'))\n gpgauth = self._checkGPGAuth(irc, msg.prefix)\n if gpgauth is None:\n irc.error(\"For identification purposes, you must be identified via GPG \"\n \"to use the order book.\")\n return\n results = self.db.getByNick(gpgauth['nick'])\n if len(results) >= self.registryValue('maxUserOpenOrders'):\n irc.error(\"You may not have more than %s outstanding open orders.\" % \\\n self.registryValue('maxUserOpenOrders'))\n return\n extratime = 0\n if dict(optlist).has_key('long'):\n extratime = self.registryValue('longOrderDuration')\n trust = self._getTrust(irc, 'nanotube', gpgauth['nick'])\n sumtrust = sum([t for t,n in trust])\n if sumtrust < self.registryValue('minTrustForLongOrders'):\n irc.error(\"You must have a minimum of %s cumulative trust at \"\n \"level 1 and level 2 from nanotube to \"\n \"to place long orders.\" % (self.registryValue('minTrustForLongOrders'),))\n return\n orderid = self.db.buy(gpgauth['nick'], msg.host, amount, thing, price, otherthing, notes, extratime)\n irc.reply(\"Order id %s created.\" % (orderid,))\n if not world.testing:\n irc.queueMsg(ircmsgs.privmsg(\"#bitcoin-otc-ticker\",\n \"#%s || %s || BUY %s %s @ %s %s || %s\" % (orderid,\n gpgauth['nick'],\n amount,\n thing,\n self._getIndexedValue(price),\n otherthing,\n notes,)))", "def test_place_multiple_orders(self):\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.assertEqual(len(self.orders_list.orders_list), 3)\n self.assertEqual(self.orders_list.orders_list[2].order_id, 2)", "def PlaceOutgoingOrder(self, state):\r\n amountToOrder , policy_action = self.policy.calculate_order( state )\r\n\r\n self.outgoingOrdersQueue.PushEnvelope(amountToOrder)\r\n self.lastOrderQuantity = amountToOrder\r\n \r\n return policy_action", "def __order_status(self):\n log.debug(\"Displaying __order_status\")\n # Find the latest orders\n orders = self.session.query(db.Order) \\\n .filter(db.Order.user == self.user) \\\n .order_by(db.Order.creation_date.desc()) \\\n .limit(20) \\\n .all()\n # Ensure there is at least one order to display\n if len(orders) == 0:\n self.bot.send_message(self.chat.id, self.loc.get(\"error_no_orders\"))\n # Display the order status to the user\n for order in orders:\n self.bot.send_message(self.chat.id, order.text(w=self, session=self.session, user=True))\n # TODO: maybe add a page displayer instead of showing the latest 5 orders", "def email_order(request, order, user_correct):\n users_entries = get_entries(order)\n\n html_email = loader.render_to_string(\n 'email_templates/order_complete.html',\n {\n 'order': order,\n 'user': request.user.first_name,\n 'user_correct': user_correct,\n 'users_entries': users_entries\n }\n )\n message = strip_tags(html_email)\n send_mail(\n 'Your Order for Project Parts',\n message=message,\n from_email='noreply@projectparts.com',\n fail_silently=False,\n connection=None,\n recipient_list=[str(request.user.email)],\n html_message=html_email\n )", "def on_order(self, order: OrderData):\n pass\n\n # self.write_log(f\"on_order: status:{order.status}, orderid: {order.vt_orderid}, offset:{order.offset}, price:{order.price}, volume:{order.volume}, traded: {order.traded}\")\n # self.put_event()", "def orderInfo(self, orderInfo):\r\n\r\n self._orderInfo = orderInfo", "def place_order(self, order: Order) -> None:\n\n if order.id in [order.id for order in self.orders]:\n raise OrderAlreadyCreatedError(order)\n\n if not order.symbol.is_enabled:\n raise SymbolIsNotEnabledError(order.symbol)\n\n t = Thread(target=self.__place_order, args=(order,))\n t.start()\n\n self.__sort_orders_by_price()", "def place_order(cls, order: 'Order') -> 'Order':\n counter_order_type = OrderType.SELL.value if order.type == OrderType.BUY.value else OrderType.BUY.value\n counter_orders = None\n with transaction.atomic():\n if counter_order_type == OrderType.SELL.value:\n counter_orders = cls.objects.select_for_update().filter(\n type=counter_order_type,\n instrument=order.instrument,\n price__lte=order.price).order_by('price', 'created_at_dt')\n elif counter_order_type == OrderType.BUY.value:\n counter_orders = cls.objects.select_for_update().filter(\n type=counter_order_type,\n instrument=order.instrument,\n price__gte=order.price).order_by('-price', 'created_at_dt')\n if not counter_orders:\n # place order into the order book\n order.save()\n return order\n for counter_order in counter_orders:\n order, counter_order, *balances = cls._trade_orders(\n order, counter_order)\n order.save()\n counter_order.save()\n for balance in balances:\n balance.save()\n if order.status == OrderStatus.COMPLETED:\n return order\n return order", "def postOrder(self,Account=None,side='buy',price=0,amount=0,order=None):\n\t\tif order is not None:\n\t\t\tif order.Posted is True:\n\t\t\t\tprint(\"No order posted-order object might have been used\")\n\t\t\t\treturn -1\n\t\telse:\n\t\t\tif Account is None:\n\t\t\t\tprint(\"No order is posted-no account was provided\")\n\t\t\t\treturn -1\n\t\t\telse:\n\t\t\t\tprice=Decimal(price)\n\t\t\t\tamount=Decimal(amount)\n\t\t\t\torder=Order.Order(Account,side,price,amount)\n\t\tcharge=False\n\t\tif side is 'buy':\n\t\t\t charge=Account.withdraw(side,price*amount) \n\t\telse:\n\t\t\tcharge=Account.withdraw(side,amount)\n\t\tif charge is False:\n\t\t\treturn -1\n\t\telse:\n\t\t\tif self.register(order) is False:\n\t\t\t\treturn -1\n\t\t\telse:\n\t\t\t\torder.Posted=True\n\t\t\t\tprint(\"\\n\"+order.Side.upper()+\" order \"+str(order.ID)+\" from account \"+str(order.Account.ID)+\" is posted\\n\")\n\t\t\t\treturn order.ID", "def placeOrder(self, dish):\n # Get a random waiter\n waiter = choice(self.waiters)\n # Place the order, returning the result\n food = waiter.takeOrder(dish, self.chefs)\n return food", "def place_sell_order(self):\n price = request.form[\"price\"]\n stocks = request.form[\"stocks\"]\n trader_id = request.form[\"trader_id\"]\n self.market.place_sell_order(trader_id, price, stocks)\n return \"\"", "def process_order(self, order_event : event.EventOrder) :\n pass", "def orderWatch(self, order):\r\n\t\tself.orders.append(order)", "def checkout(self, request, order):\n if not request.user.is_authenticated():\n if request.method == 'POST' and '_login' in request.POST:\n loginform = AuthenticationForm(data=request.POST, prefix='login')\n\n if loginform.is_valid():\n user = loginform.get_user()\n auth.login(request, user)\n\n order.user = user\n order.save()\n return HttpResponseRedirect('.')\n else:\n loginform = AuthenticationForm(prefix='login')\n else:\n loginform = None\n\n if order.status < order.CHECKOUT:\n order.update_status(order.CHECKOUT, 'Checkout process started')\n\n OrderForm = self.checkout_form(request, order)\n\n\n\n orderform_kwargs = {\n 'prefix': 'order',\n 'instance': order,\n 'request': request,\n 'shop': self,\n }\n\n if request.method == 'POST' and '_checkout' in request.POST:\n orderform = OrderForm(request.POST, **orderform_kwargs)\n\n if orderform.is_valid():\n notes = request.POST.get('order-notes')\n same_as_billing = request.POST.get('order-shipping_same_as_billing')\n delivery_address = request.POST.get('order-shipping_address')\n billing_address = request.POST.get('order-billing_address')\n delivery_address2 = request.POST.get('order-shipping_address2')\n billing_address2 = request.POST.get('order-billing_address2')\n delivery_date = request.POST.get('order-shipping_date')\n delivery_state = request.POST.get('order-shipping_state')\n billing_state = request.POST.get('order-billing_state')\n delivery_city = request.POST.get('order-shipping_city')\n billing_city = request.POST.get('order-billing_city')\n delivery_zip_code = request.POST.get('order-shipping_zip_code')\n billing_zip_code = request.POST.get('order-billing_zip_code')\n salutation = request.POST.get('order-billing_salutation')\n billing_country = request.POST.get('order-billing_country')\n delivery_country = request.POST.get('order-shipping_country')\n billing_first_name = request.POST.get('order-billing_first_name')\n billing_last_name = request.POST.get('order-billing_last_name')\n email = request.POST.get('order-email')\n billing_contact_number = request.POST.get('order-billing_contact_number')\n\n if same_as_billing:\n billing_address = delivery_address\n billing_address2 = delivery_address2\n billing_state = delivery_state\n billing_city = delivery_city\n billing_zip_code = delivery_zip_code\n billing_country = delivery_country\n\n \n # request.session['order-payment_method'] = request.POST.get('order-payment_method','')\n\n custom_data = {}\n custom_data['order_payment_method'] = request.POST.get('order-payment_method','')\n custom_data['order_notes'] = request.POST.get('order-notes','')\n custom_data['delivery_address2'] = delivery_address2\n custom_data['billing_address2'] = billing_address2\n custom_data['delivery_date'] = delivery_date\n custom_data['delivery_state'] = delivery_state\n custom_data['billing_state'] = billing_state\n custom_data['salutation'] = salutation\n custom_data['billing_country'] = billing_country\n custom_data['shipping_country'] = delivery_country\n custom_data['billing_contact_number'] = billing_contact_number\n\n try:\n order_data = OrderData.objects.get(order=order)\n except:\n order_data = OrderData()\n\n order_data.order = order\n order_data.data = simplejson.dumps(custom_data)\n order_data.save()\n \n \n orderform.save(\n notes=notes, \n billing_salutation=salutation,\n same_as_billing=True if same_as_billing else False,\n shipping_address=delivery_address,\n billing_address=billing_address,\n shipping_address2=delivery_address2,\n billing_address2=billing_address2,\n shipping_state=delivery_state,\n billing_state=billing_state,\n shipping_city=delivery_city,\n billing_city=billing_city,\n shipping_zip_code=delivery_zip_code,\n billing_zip_code=billing_zip_code,\n billing_country=billing_country,\n shipping_country=delivery_country,\n billing_first_name=billing_first_name,\n billing_last_name=billing_last_name,\n email=email\n )\n\n \"\"\"\n added notes\n \"\"\"\n return redirect('plata_shop_discounts')\n else:\n\n orderform = OrderForm(**orderform_kwargs)\n \n\n return self.render_checkout(request, {\n 'order': order,\n 'loginform': loginform,\n 'orderform': orderform,\n 'progress': 'checkout'\n })", "def handle_add(self, controller):\n \n try:\n pizza = controller.customer.pizza ## get a reference to pizza object of the customer\n \n except Exception:\n showinfo(title='Pop-up', message=\"No Pizza Created Yet.\")\n return\n \n else:\n # create an order if not exist, and add pizza to order\n c = controller.customer\n self.onPress(c) ## update requested data\n if not c.my_order:\n c.my_order = Order(c.name, c.address, c.id)\n \n c.AddToOrder()\n controller.show_frame(PageTwo) ## go to my order page" ]
[ "0.70376885", "0.6521009", "0.6342922", "0.6326408", "0.6183782", "0.6176169", "0.6154756", "0.606741", "0.60295796", "0.60066897", "0.60049874", "0.60049874", "0.60049874", "0.5994649", "0.59906405", "0.59855217", "0.59850776", "0.5975906", "0.5965155", "0.59345704", "0.5929625", "0.5907162", "0.5907162", "0.5905161", "0.58921456", "0.5866677", "0.5862132", "0.5844496", "0.583562", "0.5810452", "0.58034086", "0.5779227", "0.57685053", "0.5743363", "0.5733049", "0.5710249", "0.5695395", "0.5684049", "0.56558686", "0.5652283", "0.5621422", "0.5611015", "0.5606663", "0.5606181", "0.5606181", "0.5606181", "0.55999094", "0.5597637", "0.5592626", "0.5590204", "0.55867535", "0.55819225", "0.55394465", "0.5532854", "0.5505644", "0.54959375", "0.5492122", "0.5485944", "0.5485944", "0.54854137", "0.54841906", "0.5476378", "0.54679024", "0.5465464", "0.54595536", "0.5459027", "0.5457127", "0.5448155", "0.54384154", "0.5419563", "0.540762", "0.54004633", "0.5398062", "0.5396676", "0.5393668", "0.5375718", "0.5373647", "0.53701824", "0.5357809", "0.5356902", "0.5352528", "0.5352528", "0.53445554", "0.5343643", "0.53397244", "0.53361034", "0.5334434", "0.53317523", "0.5321319", "0.5321198", "0.5319687", "0.53164184", "0.5312923", "0.5291468", "0.52885276", "0.52854323", "0.52773535", "0.5269923", "0.5269826", "0.52668464", "0.5265486" ]
0.0
-1
Given a ManagedProcess protobuf instance, create the corresponding ProcessNode and insert it into the graph.
def add_node(self, managed_process_pb): node = ProcessNode(managed_process_pb) if node.name not in self.nodes: self.nodes[node.name] = node self.logger.info('Created node for [{}]'.format(node.name)) else: self.logger.error( 'Detected request to add a managed process using the name [{}] which is already taken'.format( node.name)) raise DuplicateManagedProcessName( 'Cannot have more than one managed process with name [{}]'.format(node.name)) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_node_process(name, properties):\n process = NodeProcess(name)\n DB.session.add(process)\n DB.session.commit()\n for p in properties:\n prop = NodeProcessProperty(process.id, p[0], p[1], p[2])\n DB.session.add(prop)\n DB.session.commit()\n return process", "def add_process(self, type, name=None, config=None):\n assert name is not None, 'must specify name for now'\n node = Process(type=type, name=name, config=config)\n self.procs[name] = node\n return node", "def insert_process_after(self, process, uuid,\n comment='', force_refresh=True):\n parent = self.get_node(uuid)\n children = list(parent.get_children())\n node = ProcessNode.objects.create(process=process, piece=parent.piece,\n comment=comment, parent_id=parent.id)\n for child in children:\n child.parent = node\n child.save()\n\n if force_refresh:\n self.refresh_tree()\n return node", "def add_process(self):\n process_id = str(self.processBox.currentText())\n\n arguments = {}\n\n for row in range(0, self.processTableWidget.rowCount()):\n p_id = \"\"\n val = None\n\n if self.processTableWidget.item(row, 0):\n p_id = self.processTableWidget.item(row, 0).text()\n if self.processTableWidget.item(row, 2):\n val = self.processTableWidget.item(row, 2).text()\n if len(val) > 0:\n try:\n val = json.loads(val)\n except json.JSONDecodeError:\n pass\n else:\n val = None\n if p_id != \"\":\n if val:\n arguments[p_id] = val\n\n self.processgraph = self.processgraph.add_process(process_id, arguments)\n # Refresh process graph in GUI\n self.reload_processgraph_view()", "def insert_process_before(self, process, uuid,\n comment='', force_refresh=True):\n target = self.get_node(uuid)\n if target == self.root_node:\n raise Exception('Error: Cannot insert before the root node.')\n\n parent = target.parent\n children = list(target.get_siblings(include_self=True))\n node = ProcessNode.objects.create(process=process, piece=parent.piece,\n comment=comment, parent_id=parent.id)\n for child in children:\n child.parent = node\n child.save()\n\n if force_refresh:\n self.refresh_tree()\n return node", "def add_process(self, process):\n self.processes[process.namespec()] = process", "def process(cls, process_id: str, arguments: dict = None, namespace: Union[str, None] = None, **kwargs):\n arguments = {**(arguments or {}), **kwargs}\n for arg, value in arguments.items():\n if isinstance(value, ProcessBuilderBase):\n arguments[arg] = value.pgnode\n elif isinstance(value,list):\n for index,arrayelement in enumerate(value):\n if(isinstance(arrayelement,ProcessBuilderBase)):\n value[index] = arrayelement.pgnode\n\n for arg in [a for a, v in arguments.items() if v is UNSET]:\n del arguments[arg]\n return cls(PGNode(process_id=process_id, arguments=arguments, namespace=namespace))", "def graph_add_process(self, process_id, args) -> 'ImageCollection':\n graph = {\n 'process_id': process_id,\n 'args': args\n }\n\n return RESTProcesses(graph, self.connection)", "def _RegisterProcess(self, process):\n if process is None:\n raise ValueError('Missing process.')\n\n if process.pid in self._processes_per_pid:\n raise KeyError(\n 'Already managing process: {0!s} (PID: {1:d})'.format(\n process.name, process.pid))\n\n self._processes_per_pid[process.pid] = process", "def addProcessObject(self, process, env, uid=None, gid=None):\n name = process.getName()\n self.processes[name] = (process, env, uid, gid)\n self.delay[name] = self.minRestartDelay\n if self.running:\n self.startProcess(name)", "def run_process(self, process, piece='a', number=1, comment='', force_refresh=True):\n branch = self.get_piece(piece)\n node = self._insert_node(process, piece, number, branch, comment)\n if force_refresh: # workaround to force the root node to update\n self.refresh_tree()\n return node", "def custom_process_from_process_graph(\n process_spec: Union[dict, Path],\n process_registry: ProcessRegistry = process_registry_100,\n namespace: str = DEFAULT_NAMESPACE\n):\n # TODO: option to hide process graph for (public) listing\n if isinstance(process_spec, Path):\n process_spec = load_json(process_spec)\n process_id = process_spec[\"id\"]\n process_function = _process_function_from_process_graph(process_spec)\n process_registry.add_function(process_function, name=process_id, spec=process_spec, namespace=namespace)", "def _create_process_instance(self, process_id, name, module, cls, config, proc_attr):\n # SERVICE INSTANCE.\n process_instance = for_name(module, cls)\n if not isinstance(process_instance, BaseService):\n raise ContainerConfigError(\"Instantiated service not a BaseService %r\" % process_instance)\n\n # Prepare service instance\n process_instance.errcause = \"\"\n process_instance.id = process_id\n process_instance.container = self.container\n process_instance.CFG = config\n process_instance._proc_name = name\n process_instance._proc_start_time = time.time()\n for att, att_val in proc_attr.iteritems():\n setattr(process_instance, att, att_val)\n\n #Unless the process has been started as part of another Org, default to the container Org or the ION Org\n if config.has_key('org_governance_name'):\n process_instance.org_governance_name = config['org_governance_name']\n else:\n process_instance.org_governance_name = CFG.get_safe('container.org_name', CFG.get_safe('system.root_org', 'ION'))\n\n\n # Add stateful process operations\n if hasattr(process_instance, \"_flush_state\"):\n def _flush_state():\n with process_instance._state_lock:\n state_obj = process_instance.container.state_repository.put_state(process_instance.id, process_instance._proc_state,\n state_obj=process_instance._proc_state_obj)\n state_obj.state = None # Make sure memory footprint is low for larger states\n process_instance._proc_state_obj = state_obj\n process_instance._proc_state_changed = False\n\n def _load_state():\n if not hasattr(process_instance, \"_proc_state\"):\n process_instance._proc_state = {}\n try:\n with process_instance._state_lock:\n new_state, state_obj = process_instance.container.state_repository.get_state(process_instance.id)\n process_instance._proc_state.clear()\n process_instance._proc_state.update(new_state)\n process_instance._proc_state_obj = state_obj\n process_instance._proc_state_changed = False\n except NotFound as nf:\n log.debug(\"No persisted state available for process %s\", process_instance.id)\n except Exception as ex:\n log.warn(\"Process %s load state failed: %s\", process_instance.id, str(ex))\n process_instance._flush_state = _flush_state\n process_instance._load_state = _load_state\n process_instance._state_lock = RLock()\n process_instance._proc_state = {}\n process_instance._proc_state_obj = None\n process_instance._proc_state_changed = False\n\n # PROCESS RESTART: Need to check whether this process had persisted state.\n # Note: This could happen anytime during a system run, not just on RESTART boot\n log.debug(\"Loading persisted state for process %s\", process_id)\n process_instance._load_state()\n\n # start service dependencies (RPC clients)\n self._start_process_dependencies(process_instance)\n\n return process_instance", "def create(self, context=None):\n values = self.obj_get_changes()\n db_nodegroup = self.dbapi.create_nodegroup(values)\n self._from_db_object(self, db_nodegroup)", "def new_process() -> Process:\n return multiprocessing.Process()", "def alloc_proc(self, process, delta_t):\n\t\tself._process_list.append(process)", "def create_node_type(name, processes):\n node_type = NodeType(name)\n node_type.processes = processes\n DB.session.add(node_type)\n DB.session.commit()\n return node_type", "def create_process(\n self,\n model,\n client_plans,\n client_config,\n server_config,\n server_averaging_plan,\n client_protocols=None,\n ):\n process = FLProcess(\n model=model,\n client_plans=client_plans,\n client_config=client_config,\n server_config=server_config,\n client_protocols=client_protocols,\n server_averaging_plan=server_averaging_plan,\n )\n\n self.processes[process.id] = process\n return self.processes[process.id]", "def test_addProcess(self):\r\n self.pm.addProcess(\"foo\", [\"arg1\", \"arg2\"],\r\n uid=1, gid=2, env={})\r\n self.assertEqual(self.pm.protocols, {})\r\n self.assertEqual(self.pm.processes,\r\n {\"foo\": ([\"arg1\", \"arg2\"], 1, 2, {})})\r\n self.pm.startService()\r\n self.reactor.advance(0)\r\n self.assertEqual(self.pm.protocols.keys(), [\"foo\"])", "def add_task(bpmn, counts, label):\r\n from pm4py.objects.bpmn.bpmn_graph import BPMN\r\n task = BPMN.Task(name=label)\r\n bpmn.add_node(task)\r\n return bpmn, task, counts", "def addProcess(self, name, args, uid=None, gid=None, env={}):\n class SimpleProcessObject(object):\n\n def starting(self):\n pass\n\n def stopped(self):\n pass\n\n def getName(self):\n return name\n\n def getCommandLine(self):\n return args\n\n def getFileDescriptors(self):\n return []\n\n self.addProcessObject(SimpleProcessObject(), env, uid, gid)", "def _register_process(self, process_instance, name):\n # Add process instance to container's process dict\n if name in self.procs_by_name:\n log.warn(\"Process name already registered in container: %s\" % name)\n self.procs_by_name[name] = process_instance\n self.procs[process_instance.id] = process_instance\n\n # Add Process to resource registry\n # Note: In general the Process resource should be created by the CEI PD, but not all processes are CEI\n # processes. How to deal with this?\n process_instance.errcause = \"registering\"\n\n if process_instance._proc_type != IMMEDIATE_PROCESS_TYPE:\n if self.container.has_capability(self.container.CCAP.RESOURCE_REGISTRY):\n proc_obj = Process(name=process_instance.id, label=name, proctype=process_instance._proc_type)\n proc_id, _ = self.container.resource_registry.create(proc_obj)\n process_instance._proc_res_id = proc_id\n\n # Associate process with container resource\n self.container.resource_registry.create_association(self.cc_id, \"hasProcess\", proc_id)\n else:\n process_instance._proc_res_id = None\n\n # Process type specific registration\n # TODO: Factor out into type specific handler functions\n if process_instance._proc_type == SERVICE_PROCESS_TYPE:\n if self.container.has_capability(self.container.CCAP.RESOURCE_REGISTRY):\n # Registration of SERVICE process: in resource registry\n service_list, _ = self.container.resource_registry.find_resources(restype=\"Service\", name=process_instance.name, id_only=True)\n if service_list:\n process_instance._proc_svc_id = service_list[0]\n if len(service_list) > 1:\n log.warn(\"More than 1 Service resource found with name %s: %s\", process_instance.name, service_list)\n else:\n # We are starting the first process of a service instance\n # TODO: This should be created by the HA Service agent in the future\n svc_obj = Service(name=process_instance.name, exchange_name=process_instance._proc_listen_name, state=ServiceStateEnum.READY)\n process_instance._proc_svc_id, _ = self.container.resource_registry.create(svc_obj)\n\n # Create association to service definition resource\n svcdef_list, _ = self.container.resource_registry.find_resources(restype=\"ServiceDefinition\",\n name=process_instance.name, id_only=True)\n if svcdef_list:\n if len(svcdef_list) > 1:\n log.warn(\"More than 1 ServiceDefinition resource found with name %s: %s\", process_instance.name, svcdef_list)\n self.container.resource_registry.create_association(process_instance._proc_svc_id,\n \"hasServiceDefinition\", svcdef_list[0])\n else:\n log.error(\"Cannot find ServiceDefinition resource for %s\", process_instance.name)\n\n self.container.resource_registry.create_association(process_instance._proc_svc_id, \"hasProcess\", proc_id)\n\n elif process_instance._proc_type == AGENT_PROCESS_TYPE:\n if self.container.has_capability(self.container.CCAP.DIRECTORY):\n # Registration of AGENT process: in Directory\n caps = process_instance.get_capabilities()\n self.container.directory.register(\"/Agents\", process_instance.id,\n **dict(name=process_instance._proc_name,\n container=process_instance.container.id,\n resource_id=process_instance.resource_id,\n agent_id=process_instance.agent_id,\n def_id=process_instance.agent_def_id,\n capabilities=caps))\n\n self._call_proc_state_changed(process_instance, ProcessStateEnum.RUNNING)", "def spawnProcess(self, processProtocol, executable, args=(), env={},\r\n path=None, uid=None, gid=None, usePTY=0,\r\n childFDs=None):\r\n\r\n proc = DummyProcess(self, executable, args, env, path,\r\n processProtocol, uid, gid, usePTY, childFDs)\r\n processProtocol.makeConnection(proc)\r\n self.spawnedProcesses.append(proc)\r\n return proc", "def addProcess(self, name, proc_config):\n if self.processes.has_key(name):\n raise KeyError(\"remove %s first\" % name)\n p = self.engineProtocol()\n p.service = self\n p.name = name\n proc_config.processProtocol = p\n self.processes[name] = proc_config\n if self.running:\n self.startProcess(name)\n return p.deferred", "def add_node(self, ip, port, father_address):\n father = self.find_node(father_address)\n if father is not None:\n node = GraphNode((ip, port))\n node.alive = True\n node.set_parent(father)\n father.add_child(node)\n self.nodes.append(node)\n pass", "def graph_add_process(self, process_id, args) -> 'ImageCollection':\n graph = {\n 'process_id': process_id,\n\n }\n\n for key, value in args.items():\n graph[key] = value\n\n #graph = {\n # 'process_id': process_id,\n # 'args': args\n #}\n\n return RestImagery(graph, self.session)", "def spawn_process(self, name=None, module=None, cls=None, config=None, process_id=None):\n if process_id and not is_valid_identifier(process_id, ws_sub='_'):\n raise BadRequest(\"Given process_id %s is not a valid identifier\" % process_id)\n\n # Generate a new process id if not provided\n # TODO: Ensure it is system-wide unique\n process_id = process_id or \"%s.%s\" % (self.container.id, self.proc_id_pool.get_id())\n log.debug(\"ProcManager.spawn_process(name=%s, module.cls=%s.%s, config=%s) as pid=%s\", name, module, cls, config, process_id)\n\n process_cfg = deepcopy(CFG)\n if config:\n # Use provided config. Must be dict or DotDict\n if not isinstance(config, DotDict):\n config = DotDict(config)\n if config.get_safe(\"process.config_ref\"):\n # Use a reference\n config_ref = config.get_safe(\"process.config_ref\")\n log.info(\"Enhancing new process spawn config from ref=%s\" % config_ref)\n matches = re.match(r'^([A-Za-z]+):([A-Za-z0-9_\\.]+)/(.*)$', config_ref)\n if matches:\n ref_type, ref_id, ref_ext = matches.groups()\n if ref_type == \"resources\":\n if self.container.has_capability(self.container.CCAP.RESOURCE_REGISTRY):\n try:\n obj = self.container.resource_registry.read(ref_id)\n if obj and hasattr(obj, ref_ext):\n ref_config = getattr(obj, ref_ext)\n if isinstance(ref_config, dict):\n dict_merge(process_cfg, ref_config, inplace=True)\n else:\n raise BadRequest(\"config_ref %s exists but not dict\" % config_ref)\n else:\n raise BadRequest(\"config_ref %s - attribute not found\" % config_ref)\n except NotFound as nf:\n log.warn(\"config_ref %s - object not found\" % config_ref)\n raise\n else:\n log.error(\"Container missing RESOURCE_REGISTRY capability to resolve process config ref %s\" % config_ref)\n elif ref_type == \"objects\":\n if self.container.has_capability(self.container.CCAP.OBJECT_STORE):\n try:\n obj = self.container.object_store.read_doc(ref_id)\n ref_config = obj\n if ref_ext:\n ref_config = get_safe(obj, ref_ext, None)\n if ref_config is None:\n raise BadRequest(\"config_ref %s - attribute not found\" % config_ref)\n\n if isinstance(ref_config, dict):\n dict_merge(process_cfg, ref_config, inplace=True)\n else:\n raise BadRequest(\"config_ref %s exists but not dict\" % config_ref)\n except NotFound as nf:\n log.warn(\"config_ref %s - object not found\" % config_ref)\n raise\n else:\n log.error(\"Container missing OBJECT_STORE capability to resolve process config ref %s\" % config_ref)\n else:\n raise BadRequest(\"Unknown reference type in: %s\" % config_ref)\n\n dict_merge(process_cfg, config, inplace=True)\n if self.container.spawn_args:\n # Override config with spawn args\n dict_merge(process_cfg, self.container.spawn_args, inplace=True)\n\n #log.debug(\"spawn_process() pid=%s process_cfg=%s\", process_id, process_cfg)\n\n # PROCESS TYPE. Determines basic process context (messaging, service interface)\n # One of the constants defined at the top of this file\n\n service_cls = named_any(\"%s.%s\" % (module, cls))\n process_type = get_safe(process_cfg, \"process.type\") or getattr(service_cls, \"process_type\", \"service\")\n\n process_start_mode = get_safe(config, \"process.start_mode\")\n\n process_instance = None\n\n # alert we have a spawning process, but we don't have the instance yet, so give the class instead (more accurate than name)\n self._call_proc_state_changed(\"%s.%s\" % (module, cls), ProcessStateEnum.PENDING)\n\n try:\n # Additional attributes to set with the process instance\n proc_attr = {\"_proc_type\": process_type,\n \"_proc_spawn_cfg\": config\n }\n\n # spawn process by type\n if process_type == SERVICE_PROCESS_TYPE:\n process_instance = self._spawn_service_process(process_id, name, module, cls, process_cfg, proc_attr)\n\n elif process_type == STREAM_PROCESS_TYPE:\n process_instance = self._spawn_stream_process(process_id, name, module, cls, process_cfg, proc_attr)\n\n elif process_type == AGENT_PROCESS_TYPE:\n process_instance = self._spawn_agent_process(process_id, name, module, cls, process_cfg, proc_attr)\n\n elif process_type == STANDALONE_PROCESS_TYPE:\n process_instance = self._spawn_standalone_process(process_id, name, module, cls, process_cfg, proc_attr)\n\n elif process_type == IMMEDIATE_PROCESS_TYPE:\n process_instance = self._spawn_immediate_process(process_id, name, module, cls, process_cfg, proc_attr)\n\n elif process_type == SIMPLE_PROCESS_TYPE:\n process_instance = self._spawn_simple_process(process_id, name, module, cls, process_cfg, proc_attr)\n\n else:\n raise BadRequest(\"Unknown process type: %s\" % process_type)\n\n self._register_process(process_instance, name)\n\n process_instance.errcause = \"OK\"\n log.info(\"ProcManager.spawn_process: %s.%s -> pid=%s OK\", module, cls, process_id)\n\n if process_type == IMMEDIATE_PROCESS_TYPE:\n log.info('Terminating immediate process: %s', process_instance.id)\n self.terminate_process(process_instance.id)\n\n # terminate process also triggers TERMINATING/TERMINATED\n self._call_proc_state_changed(process_instance, ProcessStateEnum.EXITED)\n\n else:\n #Update local policies for the new process\n if self.container.has_capability(self.container.CCAP.GOVERNANCE_CONTROLLER):\n self.container.governance_controller.update_container_policies(process_instance, safe_mode=True)\n\n return process_instance.id\n\n except IonProcessError:\n errcause = process_instance.errcause if process_instance else \"instantiating process\"\n log.exception(\"Error spawning %s %s process (process_id: %s): %s\", name, process_type, process_id, errcause)\n return None\n\n except Exception:\n errcause = process_instance.errcause if process_instance else \"instantiating process\"\n log.exception(\"Error spawning %s %s process (process_id: %s): %s\", name, process_type, process_id, errcause)\n\n # trigger failed notification - catches problems in init/start\n self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED)\n\n raise", "def add_process(self, model_id, n_cores, n_time, s_time):\n p = Process(n_cores=n_cores, time_needed=n_time, model_id=model_id, start_time=s_time)\n self.process_list.append(p)", "def createNode(self, pkg, exe, args, name, nspace):\r\n node = Node(self)\r\n self.callRemote('createNode', pkg, exe, args, name,\r\n nspace).chainDeferred(node)\r\n return node", "def _create(self, context, values):\n # initialize load stats from existing instances:\n compute_node = db.compute_node_create(context, values)\n return compute_node", "def _spawn_simple_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n # Add publishers if any...\n publish_streams = get_safe(config, \"process.publish_streams\")\n pub_names = self._set_publisher_endpoints(process_instance, publish_streams)\n\n # cleanup method to delete process queue (@TODO: leaks a bit here - should use XOs)\n def cleanup(*args):\n for name in pub_names:\n p = getattr(process_instance, name)\n p.close()\n\n proc = self.proc_sup.spawn(name=process_instance.id,\n service=process_instance,\n listeners=[],\n proc_name=process_instance._proc_name,\n cleanup_method=cleanup)\n proc.proc._glname = \"ION Proc %s\" % process_instance._proc_name\n self.proc_sup.ensure_ready(proc, \"_spawn_simple_process for %s\" % process_instance.id)\n\n # map gproc to process_instance\n self._spawned_proc_to_process[proc.proc] = process_instance\n\n # set service's reference to process\n process_instance._process = proc\n\n self._process_init(process_instance)\n self._process_start(process_instance)\n\n return process_instance", "def remote_createNode(self, pkg, exe, args, name, namespace):\r\n return Node(self, pkg, exe, args, name, namespace)", "def describe_process(self, process):\n description = self.client.describeprocess(process.identifier)\n\n new_process = Process.from_owslib(description)\n\n new_process.inputs = process.inputs\n\n new_process.domain = process.domain\n\n new_process.parameters = process.parameters\n\n return new_process", "def insert_workflow_module(self, project_id, branch_id, before_module_id, command):\n with self.backend.lock:\n # Get the handle for the specified branch and the branch head\n branch = self.projects.get_branch(project_id=project_id, branch_id=branch_id)\n if branch is None:\n return None\n head = branch.get_head()\n if head is None or len(head.modules) == 0:\n return None\n\n # Get the index of the module at which the new module is inserted\n module_index = None\n modules = head.modules\n for i in range(len(modules)):\n if modules[i].identifier == before_module_id:\n module_index = i\n break\n if module_index is None:\n return None\n\n # Get handle for the inserted module\n context = compute_context(modules[0:module_index])\n # Create handle for the inserted module. The state of the module\n # depends on the state of the backend.\n if head.is_active:\n state = mstate.MODULE_PENDING\n else:\n state = self.backend.next_task_state()\n inserted_module = ModuleHandle(\n command=command,\n state=state,\n external_form=command.to_external_form(\n command=self.packages[command.package_id].get(command.command_id),\n datasets=[ context[name] for name in context if context[name].is_dataset ]\n ),\n provenance=ModuleProvenance(unexecuted=True)\n )\n # Create list of pending modules for the new workflow.\n pending_modules = [inserted_module]\n for m in modules[module_index:]:\n pending_modules.append(\n ModuleHandle(\n command=m.command,\n external_form=m.external_form,\n outputs=m.outputs,\n provenance=m.provenance\n )\n )\n workflow = branch.append_workflow(\n modules=modules[:module_index],\n action=wf.ACTION_INSERT,\n command=inserted_module.command,\n pending_modules=pending_modules\n )\n if not head.is_active:\n self.execute_module(\n project_id=project_id,\n branch_id=branch_id,\n module=workflow.modules[module_index],\n artifacts=context,\n )\n return workflow.modules[module_index:]", "def _spawn_standalone_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n rsvc = self._create_listening_endpoint(node=self.container.node,\n from_name=process_instance.id,\n process=process_instance)\n\n # Add publishers if any...\n publish_streams = get_safe(config, \"process.publish_streams\")\n pub_names = self._set_publisher_endpoints(process_instance, publish_streams)\n\n # cleanup method to delete process queue (@TODO: leaks a bit here - should use XOs)\n def cleanup(*args):\n self._cleanup_method(process_instance.id, rsvc)\n for name in pub_names:\n p = getattr(process_instance, name)\n p.close()\n\n proc = self.proc_sup.spawn(name=process_instance.id,\n service=process_instance,\n listeners=[rsvc],\n proc_name=process_instance._proc_name,\n cleanup_method=cleanup)\n proc.proc._glname = \"ION Proc %s\" % process_instance._proc_name\n self.proc_sup.ensure_ready(proc, \"_spawn_standalone_process for %s\" % process_instance.id)\n\n # map gproc to process_instance\n self._spawned_proc_to_process[proc.proc] = process_instance\n\n # set service's reference to process\n process_instance._process = proc\n\n self._process_init(process_instance)\n self._process_start(process_instance)\n\n try:\n proc.start_listeners()\n except IonProcessError:\n self._process_quit(process_instance)\n self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED)\n raise\n\n return process_instance", "def create_process(args):\n process = args.process\n\n # create list of valid machines\n valid_machines = []\n valid_types = [cmpy.machines.MealyHMM, \n cmpy.machines.RecurrentEpsilonMachine]\n\n for em in dir(cmpy.machines):\n if em[0].isupper():\n try:\n m_str = 'cmpy.machines.' + em +'()' \n eval(m_str)\n mtype = type(eval(m_str))\n if mtype in valid_types:\n valid_machines.append(em)\n except:\n pass\n\n # remove MealyHMM, RecurrentEpsilonMachine\n valid_machines.remove('MealyHMM')\n valid_machines.remove('RecurrentEpsilonMachine')\n\n # if in valid_machine, try to create instance\n if process in valid_machines:\n eM = eval('cmpy.machines.' + process + '()')\n else: \n error_msg = (\"\\n\\nProcess {} not valid. Try:\\n\\n{}\\n\".format(process,\n valid_machines))\n raise ProcessException(error_msg)\n\n return eM", "def add_node (self, node):\n self.network.add_node(node.id)\n self.network.node[node.id] = node", "def create_node(self, address, service):\n node = create_node(address, service)\n node.id = address + \"_\" + service\n return node", "def register_proc(self, pid: int):\n self.processes.add(pid)", "def _spawn_agent_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n if not isinstance(process_instance, ResourceAgent) and not isinstance(process_instance, SimpleResourceAgent):\n raise ContainerConfigError(\"Agent process must extend ResourceAgent\")\n listeners = []\n\n # Set the resource ID if we get it through the config\n resource_id = get_safe(process_instance.CFG, \"agent.resource_id\")\n if resource_id:\n process_instance.resource_id = resource_id\n\n alistener = self._create_listening_endpoint(node=self.container.node,\n from_name=resource_id,\n process=process_instance)\n\n listeners.append(alistener)\n\n rsvc = self._create_listening_endpoint(node=self.container.node,\n from_name=process_instance.id,\n process=process_instance)\n\n listeners.append(rsvc)\n\n # cleanup method to delete process/agent queue (@TODO: leaks a bit here - should use XOs)\n def agent_cleanup(x):\n self._cleanup_method(process_instance.id, rsvc)\n if resource_id:\n self._cleanup_method(resource_id, alistener)\n\n proc = self.proc_sup.spawn(name=process_instance.id,\n service=process_instance,\n listeners=listeners,\n proc_name=process_instance._proc_name,\n cleanup_method=agent_cleanup)\n proc.proc._glname = \"ION Proc %s\" % process_instance._proc_name\n self.proc_sup.ensure_ready(proc, \"_spawn_agent_process for %s\" % process_instance.id)\n\n # map gproc to process_instance\n self._spawned_proc_to_process[proc.proc] = process_instance\n\n # set service's reference to process\n process_instance._process = proc\n\n # Now call the on_init of the agent.\n self._process_init(process_instance)\n\n if not process_instance.resource_id:\n log.warn(\"New agent pid=%s has no resource_id set\" % process_id)\n\n self._process_start(process_instance)\n\n try:\n proc.start_listeners()\n except IonProcessError:\n self._process_quit(process_instance)\n self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED)\n raise\n\n if not process_instance.resource_id:\n log.warn(\"Agent process id=%s does not define resource_id!!\" % process_instance.id)\n\n return process_instance", "def register_process(self, data_process_id=''):\n\n # retrieve the data_process object\n data_process_obj = self.clients.resource_registry.read(data_process_id)\n if data_process_obj is None:\n raise NotFound(\"Data Process %s does not exist\" % data_process_id)\n\n #create data producer resource and associate to this data_process_id\n data_producer_obj = IonObject(RT.DataProducer,name=data_process_obj.name, description=\"primary producer resource for this process\", is_primary=True)\n data_producer_id, rev = self.clients.resource_registry.create(data_producer_obj)\n\n # Create association\n self.clients.resource_registry.create_association(data_process_id, PRED.hasDataProducer, data_producer_id)\n\n # TODO: Walk up the assocations to find parent producers:\n # proc->subscription->stream->prod\n\n return data_producer_id", "def create_node_handle(node_name, node_type_name, node_meta_type):\n user = get_user()\n node_type = get_node_type(node_type_name)\n node_handle = NodeHandle.objects.create(node_name=node_name, node_type=node_type, node_meta_type=node_meta_type,\n creator=user, modifier=user)\n node_handle.save()\n activitylog.create_node(user, node_handle)\n return node_handle", "def _add_node(self, node_name, node_type):\n q = 'MATCH (r:' + node_type + ') WHERE r.name=\"' \\\n + node_name + '\" RETURN r'\n results = self.db.query(q, returns=(client.Node, str, client.Node))\n res = self.db.labels.create(node_type)\n\n if (len(results) == 0):\n r = self.db.nodes.create(name=node_name)\n res.add(r)\n else:\n r = results[0][0]\n return r", "def create_node(self, **kwargs):\n if not self.nodes:\n self.get_nodes()\n\n _node = Node(project_id=self.project_id, connector=self.connector, **kwargs)\n\n _node.create()\n self.nodes.append(_node)\n print(\n f\"Created: {_node.name} -- Type: {_node.node_type} -- \"\n f\"Console: {_node.console}\"\n )", "def _StartMonitoringProcess(self, process):\n if process is None:\n raise ValueError('Missing process.')\n\n pid = process.pid\n\n if pid in self._process_information_per_pid:\n raise KeyError(\n 'Already monitoring process (PID: {0:d}).'.format(pid))\n\n if pid in self._rpc_clients_per_pid:\n raise KeyError(\n 'RPC client (PID: {0:d}) already exists'.format(pid))\n\n rpc_client = plaso_xmlrpc.XMLProcessStatusRPCClient()\n\n # Make sure that a worker process has started its RPC server.\n # The RPC port will be 0 if no server is available.\n rpc_port = process.rpc_port.value\n time_waited_for_process = 0.0\n while not rpc_port:\n time.sleep(0.1)\n rpc_port = process.rpc_port.value\n time_waited_for_process += 0.1\n\n if time_waited_for_process >= self._RPC_SERVER_TIMEOUT:\n raise IOError(\n 'RPC client unable to determine server (PID: {0:d}) port.'.format(\n pid))\n\n hostname = 'localhost'\n\n if not rpc_client.Open(hostname, rpc_port):\n raise IOError((\n 'RPC client unable to connect to server (PID: {0:d}) '\n 'http://{1:s}:{2:d}').format(pid, hostname, rpc_port))\n\n self._rpc_clients_per_pid[pid] = rpc_client\n self._process_information_per_pid[pid] = process_info.ProcessInfo(pid)", "def create_node(self, **kwargs):\n size = kwargs['size'].ram\n params = {\n 'cmd' : 'dreamhost_ps-add_ps',\n 'movedata' : kwargs.get('movedata', 'no'),\n 'type' : kwargs['image'].name,\n 'size' : size\n }\n data = self.connection.request('/', params).object\n return Node(\n id = data['added_web'],\n name = data['added_web'],\n state = NodeState.PENDING,\n public_ip = [],\n private_ip = [],\n driver = self.connection.driver,\n extra = {\n 'type' : kwargs['image'].name\n }\n )", "def _instantiate_processes(self, input=None, context=None):\n# FIX: ALLOW Projections (??ProjectionTiming TUPLES) TO BE INTERPOSED BETWEEN MECHANISMS IN PATHWAY\n# FIX: AUGMENT LinearMatrix TO USE FULL_CONNECTIVITY_MATRIX IF len(sender) != len(receiver)\n\n # # MODIFIED 2/8/17 OLD: [SEE BELOW]\n # self.variable = []\n # MODIFIED 2/8/17 END\n self.mechanismsDict = {}\n self._all_mech_tuples = []\n self._allMechanisms = MechanismList(self, self._all_mech_tuples)\n\n # Get list of processes specified in arg to init, possibly appended by EVCMechanism (with prediction processes)\n processes_spec = self.processes\n\n # Assign default Process if PROCESS is empty, or invalid\n if not processes_spec:\n from PsyNeuLink.Components.Process import Process_Base\n processes_spec.append(ProcessTuple(Process_Base(), None))\n\n # If input to system is specified, number of items must equal number of processes with origin mechanisms\n if input is not None and len(input) != len(self.originMechanisms):\n raise SystemError(\"Number of items in input ({}) must equal number of processes ({}) in {} \".\n format(len(input), len(self.originMechanisms),self.name))\n\n #region VALIDATE EACH ENTRY, STANDARDIZE FORMAT AND INSTANTIATE PROCESS\n\n # Convert all entries to (process, input) tuples, with None as filler for absent input\n input_index = input_index_curr = 0\n for i in range(len(processes_spec)):\n\n # MODIFIED 2/8/17 NEW:\n # Get list of origin mechanisms for processes that have already been converted\n # (for use below in assigning input)\n orig_mechs_already_processed = list(p[0].originMechanisms[0] for\n p in processes_spec if isinstance(p,ProcessTuple))\n # MODIFIED 2/8/17 END\n\n # Entry is not a tuple\n # presumably it is a process spec, so enter it as first item of ProcessTuple\n if not isinstance(processes_spec[i], tuple):\n processes_spec[i] = ProcessTuple(processes_spec[i], None)\n\n # Entry is a tuple but not a ProcessTuple, so convert it\n if isinstance(processes_spec[i], tuple) and not isinstance(processes_spec[i], ProcessTuple):\n processes_spec[i] = ProcessTuple(processes_spec[i][0], processes_spec[i][1])\n\n # Input was NOT provided on command line, so get it from the process\n if input is None:\n process = processes_spec[i].process\n process_input = []\n for process_input_state in process.processInputStates:\n process_input.extend(process_input_state.value)\n processes_spec[i] = ProcessTuple(process, process_input)\n # Input was provided on command line, so assign that to input item of tuple\n else:\n # Assign None as input to processes implemented by controller (controller provides their input)\n # (e.g., prediction processes implemented by EVCMechanism)\n if processes_spec[i].process._isControllerProcess:\n processes_spec[i] = ProcessTuple(processes_spec[i].process, None)\n else:\n # MODIFIED 2/8/17 NEW:\n # Replace input item in tuple with one from command line\n # Note: check if origin mechanism for current process is same as any previous one;\n # if it is, use that one (and don't increment index for input\n # otherwise, assign input and increment input_index\n try:\n input_index_curr = orig_mechs_already_processed.index(processes_spec[i][0].originMechanisms[0])\n except ValueError:\n input_index += 1\n processes_spec[i] = ProcessTuple(processes_spec[i].process, input[input_index_curr])\n input_index_curr = input_index\n # MODIFIED 2/8/17 END\n\n # Validate input\n if (processes_spec[i].input is not None and\n not isinstance(processes_spec[i].input,(numbers.Number, list, np.ndarray))):\n raise SystemError(\"Second item of entry {0} ({1}) must be an input value\".\n format(i, processes_spec[i].input))\n\n process = processes_spec[i].process\n process_input = processes_spec[i].input\n\n # # MODIFIED 2/8/17 OLD: [MOVED ASSIGNMENT OF self.variable TO _instantiate_graph()\n # # SINCE THAT IS WHERE SYSTEM'S ORIGIN MECHANISMS ARE IDENTIFIED]\n # self.variable.append(process_input)\n # # MODIFIED 2/8/17 END\n\n # IMPLEMENT: THIS IS WHERE LEARNING SPECIFIED FOR A SYSTEM SHOULD BE IMPLEMENTED FOR EACH PROCESS IN THE\n # SYSTEM; NOTE: IF THE PROCESS IS ALREADY INSTANTIATED WITHOUT LEARNING\n # (FIRST CONDITIONAL BELOW), MAY NEED TO BE RE-INSTANTIATED WITH LEARNING\n # (QUESTION: WHERE TO GET SPECS FOR PROCESS FOR RE-INSTANTIATION??)\n\n # If process item is a Process object, assign process_input as default\n if isinstance(process, Process):\n if process_input is not None:\n process._assign_defaults(variable=process_input, context=context)\n # If learning_rate is specified for system but not for process, then apply to process\n # # MODIFIED 3/21/17 OLD:\n # if self.learning_rate and not process.learning_rate:\n # # FIX: assign_params WANTS TO CREATE A ParamaterState ON process FOR learning_rate\n # process.assign_params(request_set={LEARNING_RATE:self.learning_rate})\n # # MODIFIED 3/21/17 NEW:[learning_rate SHOULD BE NOT BE RE-ASSIGNED FOR PROCESS, BUT RATHER ON EXECUTE]\n # if self.learning_rate is not None and process.learning_rate is None:\n # process.learning_rate = self.learning_rate\n # # MODIFIED 3/21/17 END\n\n # Otherwise, instantiate Process\n else:\n if inspect.isclass(process) and issubclass(process, Process):\n # FIX: MAKE SURE THIS IS CORRECT\n # Provide self as context, so that Process knows it is part of a System (and which one)\n # Note: this is used by Process._instantiate_pathway() when instantiating first Mechanism\n # in Pathway, to override instantiation of projections from Process.input_state\n process = Process(default_input_value=process_input,\n learning_rate=self.learning_rate,\n context=self)\n elif isinstance(process, dict):\n # IMPLEMENT: HANDLE Process specification dict here;\n # include process_input as ??param, and context=self\n raise SystemError(\"Attempt to instantiate process {0} in kwProcesses of {1} \"\n \"using a Process specification dict: not currently supported\".\n format(process.name, self.name))\n else:\n raise SystemError(\"Entry {0} of kwProcesses ({1}) must be a Process object, class, or a \"\n \"specification dict for a Process\".format(i, process))\n\n # # process should now be a Process object; assign to processList\n # self.processList.append(process)\n\n # Assign the Process a reference to this System\n process.systems.append(self)\n if process.learning:\n self.learning = True\n\n # Get max of Process phaseSpecs\n self._phaseSpecMax = int(max(math.floor(process._phaseSpecMax), self._phaseSpecMax))\n\n # Iterate through mechanism tuples in Process' mech_tuples\n # to construct self._all_mech_tuples and mechanismsDict\n # FIX: ??REPLACE WITH: for sender_mech_tuple in Process._mech_tuples\n for sender_mech_tuple in process._mech_tuples:\n\n sender_mech = sender_mech_tuple.mechanism\n\n # THIS IS NOW DONE IN _instantiate_graph\n # # Add system to the Mechanism's list of systems of which it is member\n # if not self in sender_mech_tuple[MECHANISM].systems:\n # sender_mech.systems[self] = INTERNAL\n\n # Assign sender mechanism entry in self.mechanismsDict, with mech_tuple as key and its Process as value\n # (this is used by Process._instantiate_pathway() to determine if Process is part of System)\n # If the sender is already in the System's mechanisms dict\n if sender_mech_tuple.mechanism in self.mechanismsDict:\n existing_mech_tuple = self._allMechanisms._get_tuple_for_mech(sender_mech)\n if not sender_mech_tuple is existing_mech_tuple:\n # Contents of tuple are the same, so use the tuple in _allMechanisms\n if (sender_mech_tuple.phase == existing_mech_tuple.phase and\n sender_mech_tuple.params == existing_mech_tuple.params):\n pass\n # Contents of tuple are different, so raise exception\n else:\n if sender_mech_tuple.phase != existing_mech_tuple.phase:\n offending_tuple_field = 'phase'\n offending_value = PHASE_ITEM\n else:\n offending_tuple_field = 'process_input'\n offending_value = PARAMS_ITEM\n raise SystemError(\"The same mechanism in different processes must have the same parameters:\"\n \"the {} ({}) for {} in {} does not match the value({}) in {}\".\n format(offending_tuple_field,\n sender_mech_tuple.mechanism,\n sender_mech_tuple[offending_value],\n process,\n existing_mech_tuple[offending_value],\n self.mechanismsDict[sender_mech_tuple.mechanism]\n ))\n # Add to entry's list\n self.mechanismsDict[sender_mech].append(process)\n else:\n # Add new entry\n self.mechanismsDict[sender_mech] = [process]\n if not sender_mech_tuple in self._all_mech_tuples:\n self._all_mech_tuples.append(sender_mech_tuple)\n\n process._allMechanisms = MechanismList(process, tuples_list=process._mech_tuples)\n\n # # MODIFIED 2/8/17 OLD: [SEE ABOVE]\n # self.variable = convert_to_np_array(self.variable, 2)\n # # MODIFIED 2/8/17 END\n #\n # # Instantiate processList using process_tuples, and point self.processes to it\n # # Note: this also points self.params[kwProcesses] to self.processes\n self.process_tuples = processes_spec\n self._processList = ProcessList(self, self.process_tuples)\n self.processes = self._processList.processes", "async def add_process(self, ctx, process, name):\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f\"The process {process} is already being displayed\")\n elif name in PROCESSES.values():\n await ctx.send(f\"The process name {name} is already being displayed\")\n\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f\"The process {name} has been added\")", "def add_child(self, m, p):\n\t\tn = Node(move=m, parent=self, player_just_moved=p)\n\t\tself.child_nodes.append(n)\n\t\treturn n", "def add_node(self, nnode, value):\n new_node = Node(nnode, value)\n self.vert_dict[nnode] = new_node\n return new_node", "def _create_process(self, process, name):\n def _run():\n try:\n while True:\n process.loop()\n except KeyboardInterrupt:\n pass\n except:\n self._logger.exception('Process %s died!', name)\n return ProcessEnvironment().create_process(_run, name)", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(sys.argv[1], 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def create_vm(self, account, vminfo):\n node = self.driver(account).ex_create_node_from_template(\n name=vminfo.vm_name,\n template=vminfo.vsphere.template,\n )\n\n vminfo.vm_id = node.id\n\n return node.id", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(\n FLAGS.model_dir, pbfilename), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def _spawn_service_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n\n listen_name = get_safe(config, \"process.listen_name\") or process_instance.name\n log.debug(\"Service Process (%s) listen_name: %s\", name, listen_name)\n process_instance._proc_listen_name = listen_name\n\n # Service RPC endpoint\n rsvc1 = self._create_listening_endpoint(node=self.container.node,\n from_name=listen_name,\n process=process_instance)\n # Named local RPC endpoint\n rsvc2 = self._create_listening_endpoint(node=self.container.node,\n from_name=process_instance.id,\n process=process_instance)\n\n # cleanup method to delete process queue\n cleanup = lambda _: self._cleanup_method(process_instance.id, rsvc2)\n\n # Start an ION process with the right kind of endpoint factory\n proc = self.proc_sup.spawn(name=process_instance.id,\n service=process_instance,\n listeners=[rsvc1, rsvc2],\n proc_name=process_instance._proc_name,\n cleanup_method=cleanup)\n proc.proc._glname = \"ION Proc %s\" % process_instance._proc_name\n self.proc_sup.ensure_ready(proc, \"_spawn_service_process for %s\" % \",\".join((listen_name, process_instance.id)))\n\n # map gproc to process_instance\n self._spawned_proc_to_process[proc.proc] = process_instance\n\n # set service's reference to process\n process_instance._process = proc\n\n self._process_init(process_instance)\n self._process_start(process_instance)\n\n try:\n proc.start_listeners()\n except IonProcessError:\n self._process_quit(process_instance)\n self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED)\n raise\n\n return process_instance", "def add_node(graph, node_name, label, shape='record', style='filled', fillcolor='lightgrey'):\n node = Node(name=node_name, shape=shape, style=style, fillcolor=fillcolor, label=label)\n graph.add_node(node)\n return node", "def appendProcess(self, pid: int, numberOfVariables: int, processTable, diagnostics) -> int:\n self.memory[pid] = []\n\n for _i in range(numberOfVariables):\n self.memory[pid].append(MemoryItem())\n\n return 0", "def _addEntity(self, pid, chunks):\n parent = chunks[pid]\n sub = None\n # Find subject\n for i in range(len(parent.children)):\n child = chunks[parent.children[i]]\n if child.func in SubDict:\n sub = child\n if child.func == \"では\":\n if child.negative != 0 or any([val.negative != 0 for key, val in self.G.successors(child.main)]):\n pass\n else:\n sub = None\n if sub:\n self._addNode(parent, sub=sub.main)\n self._addEdge(sub.main, parent.main, label=\"陳述\", etype=\"stat\")\n else:\n self._addNode(parent)\n \n # Lopp through all children\n for i in range(len(parent.children)):\n child = chunks[parent.children[i]]\n # If child is noun\n if child.func in SubDict:\n if child.func == \"では\":\n if child.negative != 0 or any([val.negative != 0 for key, val in self.G.successors(child.main)]):\n pass\n else:\n self._addNode(child)\n self._addEdge(child.main, parent.main, label=child.func, etype=\"attr\")\n elif child.type == 0 and child.func in [\"と\", \"などと\"] and child.id + 1 == parent.id and preprocessText(chunks[parent.parent].main) not in [\"交代\", \"交換\"]:\n self._addNode(child)\n self._addEdge(child.main, parent.main, label=\"並列\", etype=\"para\")\n self._addEdge(parent.main, child.main, label=\"並列\", etype=\"para\")\n self.para.append([child.main, parent.main])\n elif child.type == 0 and child.func in ParallelDict and child.id + 1 == parent.id:\n self._addNode(child)\n self._addEdge(child.main, parent.main, label=\"並列\", etype=\"para\")\n self._addEdge(parent.main, child.main, label=\"並列\", etype=\"para\")\n self.para.append([child.main, parent.main])\n else:\n self._addNode(child)\n self._addEdge(child.main, parent.main, label=child.func, etype=\"attr\")", "def insert(self, pid, pname, pparent, pobj, ptype):\r\n self.pids.append(pid)\r\n self.pnames.append(pname)\r\n self.pparents.append(pparent)\r\n self.ptypes.append(ptype)\r\n self.pobjs.append(pobj)", "def create_graph():\n # Creates graph from saved graph_def.pb.\n\n # with tf.gfile.FastGFile(os.path.join(\n # FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:\n with tf.gfile.FastGFile(os.path.join(\n FLAGS.model_dir, 'output_graph.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def insert_node(self, node_tup):\n signature = hashlib.sha256((node_tup[0]+node_tup[4]).encode('utf-8')).hexdigest()\n app_process = sqlite3.connect('app_process::memory:', check_same_thread=False)\n app_process_cursor = app_process.cursor()\n app_process_cursor.execute(\"INSERT INTO nodes VALUES (:ip, :port, uname, :signature)\", {\"ip\":node_tup[0], \"port\":node_tup[1], \"uname\":node_tup[2], \"verifying_key\":node_tup[3]})\n app_process.commit()\n app_process.close()", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(\n MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def addNode(self, nTag, pkg, exe, args, name, namespace):\r\n try:\r\n validateName(nTag)\r\n except IllegalName:\r\n raise InvalidRequest('Node tag is not a valid.')\r\n\r\n if nTag in self._nodes:\r\n raise InvalidRequest(\"Can not use the same node tag '{0}' in the \"\r\n 'same container twice.'.format(nTag))\r\n\r\n node = self._obj.createNode(pkg, exe, args, name, namespace)\r\n self._nodes[nTag] = node\r\n node.notifyOnDeath(self._nodeDied)", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with gfile.FastGFile(os.path.join(\n FLAGS.model_dir, 'classify_image_graph_def.pb'), 'r') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(\n FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def create(self):\n\n self.init_nodes_db()\n\n # group data\n if len(self.depends) == 0:\n grouped = [((), self.data)]\n else:\n grouped = self.data.groupby(self.depends)\n\n # create all the pymc nodes\n for uniq_elem, grouped_data in grouped:\n if not isinstance(uniq_elem, tuple):\n uniq_elem = (uniq_elem,)\n\n # create new kwargs to pass to the new pymc node\n kwargs = self.kwargs.copy()\n\n # update kwarg with the right parent\n for name, parent in self.parents.items():\n kwargs[name] = parent.get_node(self.depends, uniq_elem)\n\n # get node name\n tag, subj_idx = self.create_tag_and_subj_idx(self.depends, uniq_elem)\n node_name = self.create_node_name(tag, subj_idx=subj_idx)\n\n # get value for observed node\n if self.observed:\n if self.pass_dataframe:\n kwargs[\"value\"] = grouped_data[\n self.col_name\n ] # .to_records(index=False)\n else:\n kwargs[\"value\"] = grouped_data[\n self.col_name\n ].values # .to_records(index=False)\n\n # Deterministic nodes require a parent argument that is a\n # dict mapping parent names to parent nodes. Knode wraps\n # this; so here we have to fish out the parent nodes from\n # kwargs, put them into a parent dict and put that back\n # into kwargs, which will make pm.Determinstic() get a\n # parent dict as an argument.\n if self.pymc_node is pm.Deterministic:\n parents_dict = {}\n for name, parent in self.parents.items():\n parents_dict[name] = parent.get_node(self.depends, uniq_elem)\n kwargs.pop(name)\n kwargs[\"parents\"] = parents_dict\n\n if self.observed:\n kwargs[\"parents\"][\"value\"] = kwargs[\"value\"]\n\n # Deterministic nodes require a doc kwarg, we don't really\n # need that so if its not supplied, just use the name\n if self.pymc_node is pm.Deterministic and \"doc\" not in kwargs:\n kwargs[\"doc\"] = node_name\n\n node = self.create_node(node_name, kwargs, grouped_data)\n\n if node is not None:\n self.nodes[uniq_elem] = node\n self.append_node_to_db(node, uniq_elem)", "def create(self, context=None):\n values = self.obj_get_changes()\n db_host = self.dbapi.host_create(context, values)\n self._from_db_object(context, self, db_host)", "def add_node(self, node: Node):\n prop_str = \",\\n\".join([\"n.%s = '%s'\" % (k, v) for k, v in node.data.items()])\n query = \"\"\"\n MERGE (n:%s {id: '%s'})\n SET %s\n \"\"\" % (\n node.labels,\n norm_id(node.db_ns, node.db_id),\n prop_str,\n )\n return self.create_tx(query)", "def to_networkx(self):\n import networkx as nx\n G = nx.DiGraph()\n # G.graph.update(self.config)\n\n if nx.__version__.startswith('1'):\n node_dict = G.node\n else:\n node_dict = G.nodes\n\n def _defaultstyle(node, color, shape='none', **kwargs):\n node_dict[node]['fillcolor'] = color\n node_dict[node]['style'] = 'filled'\n node_dict[node]['shape'] = shape\n node_dict[node].update(kwargs)\n # node_dict[node]['color'] = color\n\n # Add all processes\n # Make inputs and outputs nodes to prevent needing a multigraph\n for proc in self.procs.values():\n G.add_node(proc.name)\n _defaultstyle(proc.name, 'turquoise', shape='ellipse', fontsize=20)\n node_dict[proc.name]['label'] = '{}:\\n{}'.format(proc.name,\n proc.type)\n\n for iport in proc.iports.values():\n iport_name = iport.absname()\n G.add_node(iport_name)\n G.add_edge(iport_name, proc.name)\n node_dict[iport_name]['label'] = iport.name\n _defaultstyle(iport_name, '#fefefe', fontsize=14)\n\n for oport in proc.oports.values():\n oport_name = oport.absname()\n G.add_node(oport_name)\n G.add_edge(proc.name, oport_name)\n node_dict[oport_name]['label'] = oport.name\n _defaultstyle(oport_name, '#f0f0f0', fontsize=14)\n\n # Add all connections\n for proc in self.procs.values():\n for iport in proc.iports.values():\n iport_name = iport.absname()\n for oport in iport.connections:\n if oport is not None:\n oport_name = oport.absname()\n G.add_edge(oport_name, iport_name)\n return G", "def add_process(self):\r\n\r\n proc_dict = dict()\r\n total_count = len(self.newest_connections['pid'].unique())\r\n count = 0\r\n for proc in self.newest_connections['pid'].unique():\r\n count += 1\r\n percent = round((count / total_count * 100))\r\n print('{}{}Identifying processes in progress. Accomplished: {}%{}'.format(Colors.GREEN,Colors.BOLD,percent,Colors.END), end='\\r')\r\n output = subprocess.run([\"powershell.exe\", \"-Command\", f'Get-Process -Id {proc} | select-object -Property ProcessName | ft -HideTableHeaders'], capture_output=True, text=True).stdout.strip()\r\n proc_dict[proc] = output\r\n print()\r\n processes = pd.Series(proc_dict)\r\n processes_df = pd.DataFrame(processes.reset_index())\r\n processes_df.columns = ['pid', 'process_name']\r\n if 'process_name' in self.newest_connections:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on=['pid', 'process_name'], how='right')\r\n else:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on='pid', how='right')\r\n return self.newest_connections", "def addOnCreate(call, args=(), kwargs={}, nodeClass='*'):\n pass", "def add_node(self, metadata, pos):\n node = Node(metadata, pos)\n self.addItem(node)\n self.nodes[node.id] = node\n return node", "def __init__(self, pid, binary_path, host_name, node_name, telemetry):\n self.pid = pid\n self.binary_path = binary_path\n self.host_name = host_name\n self.node_name = node_name\n self.telemetry = telemetry", "def make_comms(self,comm):\n # For masters we let child_comm be the communicator used to message the node's \n # children, and parent_comm be that used to message the node's parents.\n\n parent_rank = 0\n\n # Case (1)\n if self.num_masters > 1:\n self.make_comms_many(comm)\n if self.is_master:\n parent_comm = self.comm_masters\n if self.comm_masters.Get_rank() == 0: # rank 0 is the super-master\n child_comm = self.comm_masters\n parent_rank = None\n else:\n child_comm = self.comm_block\n # Case (2)\n else:\n self.make_comm_single(comm)\n if self.is_master:\n parent_comm = self.comm_block\n child_comm = self.comm_block\n parent_rank = None\n\n # Process initialization\n from .MPIProcess import MPIWorker, MPIMaster\n if self.is_master:\n self.set_val_data()\n num_sync_workers = self.get_num_sync_workers(child_comm)\n self.process = MPIMaster( parent_comm, parent_rank=parent_rank, \n data=self.data, child_comm=child_comm, num_epochs=self.num_epochs,\n num_sync_workers=num_sync_workers, callbacks=self.callbacks )\n else:\n self.set_train_data()\n self.process = MPIWorker( parent_comm=self.comm_block, parent_rank=parent_rank, \n num_epochs=self.num_epochs, data=self.data, callbacks=self.callbacks )", "def create_nodes(self, topogramId, nodes):\n assert type(nodes) is list\n return self.make_request(\"POST\", \"nodes\", { \"topogramId\" : topogramId, \"nodes\" : nodes})", "def do_create(self, line):\n if line:\n if line in HBNBCommand.classes:\n class_to_ins = HBNBCommand.classes.get(line)\n new_instance = class_to_ins()\n new_instance.save()\n print(new_instance.id)\n else:\n print(\"** class name missing **\")\n else:\n print(\"** class doesn't exist **\")", "def _spawn_immediate_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n self._process_init(process_instance)\n self._process_start(process_instance)\n return process_instance", "def create(self, substrate, comment=''):\n process_tree = ProcessNode.objects.create(process=None, piece='a')\n sample = self.model(substrate=substrate, comment=comment,\n process_tree=process_tree)\n sample.save()\n\n return sample", "def create_process(\n target: typing.Callable, args: tuple = (), prepend_lock: bool = False\n) -> Process:\n if prepend_lock:\n args = (LOCK,) + tuple(args)\n process = multiprocessing.Process(target=target, args=args)\n return process", "def create_graph():\n with tf.gfile.FastGFile(os.path.join(\n config['inference']['model_dir'], 'output_graph.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def add_node(self, properties=None, **kwargs):\r\n\t\tif properties is None:\r\n\t\t\tproperties={}\r\n\t\t# may change method sig of Node since we can always combine arguments\r\n\t\t# here\r\n\t\tnode = Node(self._nextid, properties, **kwargs)\r\n\t\tself._nodes[self._nextid] = node\r\n\t\tself._nextid += 1\r\n\t\treturn node", "def create_entity(self):\n \n if self.ORION_CB.get_entity(self.params['name']) is None:\n \n print('[INFO]: Create new PID entity')\n \n entity_dict = {\"id\":self.params['name'], \"type\":'PID_controller'}\n for attr in ['Kp', 'Ti', 'Td', 'lim_low', 'lim_high', 'setpoint']:\n entity_dict.update({attr:{'value':self.params[attr],'type':'Number'}})\n\n entity_dict.update({'reverse_act':{'value':self.params['reverse_act'],'type':'Text'}})\n \n entity = filip.orion.Entity(entity_dict)#, attrs)\n\n self.ORION_CB.post_entity(entity)\n \n else:\n print('Entity name already assigned')", "def add_master_process(self, value=u\"on\"):\n path = [u\"master_process\"]\n self.add_config_item(self._nodeconfig, value, path)", "def install_node_instance_subgraph(ctx,instance, graph, hist=None):\n subgraph = graph.subgraph('install_{0}'.format(instance.id))\n\n ct=None\n if hist:\n #get completed tasks for instance\n ct=_completed_tasks(ctx,hist,instance.id)\n\n sequence = subgraph.sequence()\n\n #CREATE\n run=True\n if(hist and 'create' in ct):\n run=False\n\n ctx.logger.info(\"run={} CREATE {}\".format(str(run),instance.id))\n if(run):\n ctx.logger.info(\" hist={} ct={}\".format(str(hist),str(ct)))\n\n if(run):\n sequence.add(\n instance.set_state('initializing'),\n forkjoin(instance.send_event('Creating node'),\n instance.set_state('creating')),\n _add_es_log(ctx,instance,'create',instance.execute_operation('cloudify.interfaces.lifecycle.create')),\n instance.set_state('created'),\n forkjoin(*_relationships_operations(\n instance,\n 'cloudify.interfaces.relationship_lifecycle.preconfigure'\n )))\n\n #CONFIGURE\n run=True\n if(hist and 'configure' in ct):\n run=False\n\n ctx.logger.info(\"run={} CONFIGURE {}\".format(str(run),instance.id))\n\n if(run):\n sequence.add(\n forkjoin(instance.set_state('configuring'),\n instance.send_event('Configuring node')),\n _add_es_log(ctx,instance,'configure',instance.execute_operation('cloudify.interfaces.lifecycle.configure')),\n instance.set_state('configured'),\n forkjoin(*_relationships_operations(\n instance,\n 'cloudify.interfaces.relationship_lifecycle.postconfigure'\n )))\n\n # STARTING\n run=True\n if(hist and 'start' in ct):\n run=False\n\n ctx.logger.info(\"run={} START {}\".format(str(run),instance.id))\n\n if(run):\n sequence.add(\n forkjoin(instance.set_state('starting'),\n instance.send_event('Starting node')),\n instance.execute_operation('cloudify.interfaces.lifecycle.start'))\n\n # If this is a host node, we need to add specific host start\n # tasks such as waiting for it to start and installing the agent\n # worker (if necessary)\n if run and is_host_node(instance):\n sequence.add(*_host_post_start(instance))\n\n sequence.add(\n forkjoin(\n _add_es_log(ctx,instance,'start',instance.execute_operation('cloudify.interfaces.monitoring.start')),\n *_relationships_operations(\n instance,\n 'cloudify.interfaces.relationship_lifecycle.establish'\n )),\n instance.set_state('started'))\n\n subgraph.on_failure = get_install_subgraph_on_failure_handler(ctx,instance)\n return subgraph", "def gdb_add_node(node, gdb, rdf, owl):\n gdb_node = gdb.nodes.create()\n node.set_node(gdb_node)\n gdb_node.labels.add([label.split('#')[-1] for label in node.get_labels()])\n for _, pro, obj in rdf.triples((node.get_uri(), None, None)):\n if (pro, RDF.type, owl.DatatypeProperty) in rdf:\n prop_name = pro.split('#')[-1]\n value = obj.split('#')[-1]\n gdb_node.set(prop_name, value)", "def node_create(self, parent, path):\n\n q = (\"insert into nodes (parent, path) \"\n \"values (?, ?)\")\n props = (parent, path)\n return self.execute(q, props).lastrowid", "def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> Node:\r\n return Node(graph=self._graph, index=index, name=name, external_id=external_id)", "def __init__(self, operator_id: str=None, process_id: str=None): # noqa: E501\n self.swagger_types = {\n 'operator_id': str,\n 'process_id': str\n }\n\n self.attribute_map = {\n 'operator_id': 'operatorId',\n 'process_id': 'processId'\n }\n self._operator_id = operator_id\n self._process_id = process_id", "def apply(tree, parameters=None):\r\n from pm4py.objects.bpmn.bpmn_graph import BPMN\r\n counts = Counts()\r\n bpmn = BPMN()\r\n start_event = BPMN.StartEvent(name=\"start\", isInterrupting=True)\r\n end_event = BPMN.EndEvent(name=\"end\")\r\n bpmn.add_node(start_event)\r\n bpmn.add_node(end_event)\r\n bpmn, counts, _, _ = recursively_add_tree(tree, tree, bpmn, start_event, end_event, counts, 0)\r\n bpmn = delete_tau_transitions(bpmn, counts)\r\n\r\n return bpmn", "def create_graph():\n\t# Creates graph from saved graph_def.pb.\n\twith tf.gfile.FastGFile('./models/inception_v4.pb', 'rb') as f:\n\t\tgraph_def = tf.GraphDef()\n\t\tgraph_def.ParseFromString(f.read())\n\t\t_ = tf.import_graph_def(graph_def, name='')", "def make_node(self, node_property):\n # Try except because Ubigraph is old as hell!\n try: n = self.G.new_vertex()\n except: pass\n for prop, val in node_property.items():\n try: self.G.set_vertex_attribute(n, prop, val)\n except: return make_node(node_property)\n return n", "def create_graph():\n with gfile.FastGFile(os.path.join(\n FLAGS.model_dir, FLAGS.model_name), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def test_getStateIncludesProcesses(self):\r\n self.pm.addProcess(\"foo\", [\"arg1\", \"arg2\"],\r\n uid=1, gid=2, env={})\r\n self.assertEqual(self.pm.__getstate__()['processes'],\r\n {'foo': (['arg1', 'arg2'], 1, 2, {})})", "def create(cls, user, pool, run_name, experiment, sequencer,\n fwd_cycles, rev_cycles, assay, principal_investigator,\n lanes=None, contacts=None):\n with sql_connection.TRN as TRN:\n # Add the row to the process table\n process_id = cls._common_creation_steps(user)\n\n if fwd_cycles <= 0 or not isinstance(fwd_cycles, int):\n raise ValueError(\"fwd_cycles must be > 0\")\n if rev_cycles <= 0 or not isinstance(rev_cycles, int):\n raise ValueError(\"rev_cycles must be > 0\")\n\n # Add the row to the sequencing table\n sql = \"\"\"INSERT INTO qiita.sequencing_process\n (process_id, pool_composition_id, run_name, experiment,\n sequencer_id, fwd_cycles, rev_cycles, assay,\n principal_investigator, lanes)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n RETURNING sequencing_process_id\"\"\"\n TRN.add(sql, [process_id, pool.id, run_name, experiment,\n sequencer.id, fwd_cycles, rev_cycles, assay,\n principal_investigator.id, dumps(lanes)])\n instance = cls(TRN.execute_fetchlast())\n\n if contacts:\n sql = \"\"\"INSERT INTO qiita.sequencing_process_contacts\n (sequencing_process_id, contact_id)\n VALUES (%s, %s)\"\"\"\n sql_args = [[instance.id, c.id] for c in contacts]\n TRN.add(sql, sql_args, many=True)\n TRN.execute()\n\n return instance", "def create(cls, original_args, process_args, base_url, host_url, services):\n return cls(original_args, process_args, base_url, host_url, services)", "def convert(cls, node_entry, model_container, node_dict):\n name = node_entry[\"name\"]\n shape = numpy.asarray(\n [a.value for a in node_entry[\"relay_node\"].attrs.newshape], dtype=numpy.int64\n )\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(shape, name, \"shape\", model_container),\n ]\n\n node = onnx.helper.make_node(cls.__name__, input_names, node_entry[\"output_names\"])\n model_container.add_nodes([node])", "def start_process(check_id, storage, processes):\n\n process = Process(target=perform_check, args=(check_id,\n storage[check_id]['command'],\n storage[check_id]['freq'],))\n\n # Set process name to \"Process+check_id\"\n process.name = 'Process{}'.format(check_id)\n process.start()\n\n # Add process to processes dict with key=pid and value=processname\n processes[process.pid] = process\n storage[check_id]['pid'] = process.pid", "def add_node(self, node: base.Node, label: str = None) -> HandleType:\n\n if self._current_group:\n if label and label != self._current_group:\n raise ValueError('The given label does not match the current group: '\n f'{label} vs {self._current_group}.')\n label = self._current_group\n else:\n if not label:\n raise ValueError('Label should not be empty.')\n if label not in self._groups:\n self._groups[label] = [node]\n else:\n self._groups[label].append(node)\n return node.create_handle()", "def _internal_add_node(self,\r\n node_name: str,\r\n external_id: Optional[str] = None,\r\n are_neighbors_cached: bool = False,\r\n add_to_cache: bool = False) -> None:\r\n index: int = len(self)\r\n node: Node = self._create_node(index, node_name, external_id)\r\n node.are_neighbors_cached = are_neighbors_cached\r\n self._nodes[index] = node\r\n self._node_name_map[node_name] = node\r\n\r\n if add_to_cache:\r\n db: GraphDatabaseInterface = self._graph.database\r\n db_node: DBNode = db.Node.find_by_name(node.name)\r\n if db_node is None:\r\n db_node = db.Node(node.name, node.external_id)\r\n db_node.are_neighbors_cached = False\r\n db.session.add(db_node)\r\n db.session.commit()", "def create_node(self, *labels, **props):\n component = Node(*labels, **props)\n self.graph.create(component)\n return component" ]
[ "0.6773554", "0.64495486", "0.60057384", "0.5933061", "0.5871152", "0.58102006", "0.5744983", "0.57229316", "0.5719331", "0.56770027", "0.5667292", "0.5577523", "0.5570289", "0.55650806", "0.55393213", "0.55308384", "0.5441535", "0.5393277", "0.5389408", "0.5335206", "0.5284067", "0.52781135", "0.5264026", "0.5255529", "0.51701474", "0.511998", "0.5096278", "0.50743943", "0.506606", "0.5064148", "0.50633234", "0.5059192", "0.5045069", "0.50444794", "0.5037553", "0.50362134", "0.502722", "0.5003698", "0.49956", "0.499276", "0.49727276", "0.49715364", "0.4961798", "0.49591076", "0.49442244", "0.49442202", "0.49438566", "0.49435452", "0.49377686", "0.49154758", "0.49147627", "0.48982224", "0.48978114", "0.48890415", "0.48839453", "0.4880518", "0.48703298", "0.48683643", "0.48589337", "0.48584604", "0.48531654", "0.48395008", "0.48334613", "0.48222396", "0.4819775", "0.48018578", "0.47913826", "0.4790842", "0.47895315", "0.47875214", "0.47731692", "0.47710845", "0.47624952", "0.4760939", "0.4754353", "0.47496465", "0.4749525", "0.47466192", "0.4736989", "0.47369385", "0.47369376", "0.4736794", "0.47308484", "0.47233763", "0.47072658", "0.46985626", "0.46936235", "0.46898368", "0.4682593", "0.46811616", "0.46714586", "0.4671449", "0.46701804", "0.4669723", "0.46670312", "0.46582624", "0.4652312", "0.46515527", "0.46448004", "0.46420422" ]
0.7839599
0
Reset all adjacency information and rebuild it after adding nodes. This implements a collection of sanity checks (making sure every subscribed topic has a publisher, etc.) which, if they all pass, mean we can safely / sanely construct the dependency graph.
def build(self): self.logger.info('Rebuilding adjacency information') self.edges = collections.defaultdict(list) topic_to_publisher = collections.defaultdict(list) topic_to_subscribers = collections.defaultdict(list) node_to_missing_deps = collections.defaultdict(list) result = True for node in self.nodes.values(): for topic in node.provided_topics.keys(): topic_to_publisher[topic].append(node) for topic in node.required_topics: topic_to_subscribers[topic].append(node) for dep in node.additional_dependencies: if dep not in self.nodes: node_to_missing_deps[node].append(dep) if len(node_to_missing_deps) > 0: result = False msg = io.StringIO() print('Found [{}] managed processes with missing dependencies'.format(len(node_to_missing_deps)), file=msg) fmt = ' Managed process [{}] is missing [{}]' for (node, missing) in node_to_missing_deps.items(): print(fmt.format(node.name, ', '.join(missing)), file=msg) self.logger.error(msg.getvalue()) missing_publishers = [] for topic in topic_to_subscribers.keys(): if topic not in topic_to_publisher: missing_publishers.append(topic) if len(missing_publishers) > 0: result = False msg = io.StringIO() print('Found [{}] topics that do not have publishers'.format(len(missing_publishers)), file=msg) fmt = ' Topic [{}] with subscribers [{}]' for topic in missing_publishers: print(fmt.format(topic, ', '.join([x.name for x in topic_to_subscribers[topic]])), file=msg) self.logger.error(msg.getvalue()) if not result: self.logger.error('Found errors when building adjacency information') raise GraphBuildError( 'Found errors when building adjacency information / graph edges. Check log for details') # Now we have enough information to build our edges. Phase 1: pub/sub stuff for (topic, subscribers) in topic_to_subscribers.items(): publishers = topic_to_publisher[topic] for p in publishers: for s in subscribers: self.edges[p].append(s) # Phase 2: additional dependencies for node in self.nodes.values(): for dep in node.additional_dependencies: src = self.nodes[dep] self.edges[src].append(node)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _reset_state(self):\n # Directed graph, (u, v) => v depends on u. u, v are pairs of (rule_name, rule_dir_abs)\n # Used for generating Topological Sort\n self._rule_to_dependency_graph_adjlist = {}\n self._topologically_sorted_build_rule_names = []\n\n # List of (dependency_name, dependency_dir_abs) for each build rule\n self._rule_to_dependency_list = {}\n\n # Space for rough work :P\n self._unresolved_commands = set()", "def ResetGraph(self):\n self.nodes = []\n self.edges = []\n self.connections = []\n Node.resetNodeCount()\n Edge.resetEdgesCount()", "def reset_graph(self):\n self.nodes = {}\n self.add_node(self.initial_state)\n self.add_node(self.final_state)", "def clear(self):\r\n self.nodes = collections.defaultdict(list)\r\n self.nodes_mapping = collections.defaultdict(list)\r\n self.edges = 0\r\n #self.children_length={}\r\n self.parents_length = collections.defaultdict(lambda : collections.defaultdict(int))", "def populate_graph(self):\n if self.edges and self.vertices:\n graph = Graph()\n for edge in self.edges:\n graph.add_edge(edge)\n self.graph = graph\n else:\n print(\"Populate edges & vertices first, then populate graph!\")", "def reset_graph(self):\n self.graph = OrderedDict()", "def _build_dependency_graph(self):\n\n #\n # Find the binary roles\n #\n nodes, roles = self._find_roles()\n\n #\n # Build the graph\n #\n working_list = list(set(nodes.keys()))\n\n setters = [b for b, r in roles.items() if Role.SETTER in r or Role.SETTER_GETTER in r]\n\n while working_list:\n b = working_list[0]\n working_list = working_list[1:]\n\n if nodes[b] not in self._graph:\n self._graph[nodes[b]] = []\n\n # it's a root node\n if Role.GETTER not in roles[b] and Role.SETTER_GETTER not in roles[b]:\n nodes[b].set_root()\n\n # takes params from some other binary\n else:\n is_orphan = True\n for setter in setters:\n setter_strings_set = set(nodes[setter].role_strings)\n node_strings_set = set(nodes[b].role_strings)\n if setter_strings_set.intersection(node_strings_set):\n if nodes[setter] not in self._graph:\n self._graph[nodes[setter]] = []\n self._graph[nodes[setter]].append(nodes[b])\n is_orphan = False\n\n # mark orphans\n if is_orphan:\n nodes[b].set_orphan()\n\n # Clean up\n for k, childs in self._graph.iteritems():\n self._graph[k] = list(set(childs))\n\n # set leaves\n for k, c in self._graph.iteritems():\n if not c:\n k.set_leaf()\n\n # post processing:\n # remove those nodes that are not orphans\n # and are not network parsers\n\n nodes = self.nodes\n children = [c for x in self._graph.values() for c in x if x]\n leafs_non_orphan = [n for n in nodes if n.leaf and not n.orphan]\n seed_names = [x.split('/')[-1] for x in self._seed_bins]\n spurious_nodes = [n for n in leafs_non_orphan if n not in children and n.bin.split('/')[-1] not in seed_names]\n for to_rem in spurious_nodes:\n del self._graph[to_rem]", "def _restoreGraph(self):\n\n # self.tempG = self.g.copy()\n\n if nx.is_directed(self.g):\n self.tempG = nx.DiGraph(self.g)\n else:\n self.tempG = nx.Graph(self.g)\n self.deletedEdges = []\n self.deletedNodes = []", "def _reset_topological_order(self):\n self._topological_order = self._input_nodes[:]\n self.sorted = False", "def _create_connections(self):\n self.predecessors = {}\n self.successors = {}\n for nd in self.nodes:\n self.predecessors[nd.name] = []\n self.successors[nd.name] = []\n\n for (nd_out, nd_in) in self.edges:\n self.predecessors[nd_in.name].append(nd_out)\n self.successors[nd_out.name].append(nd_in)", "def __init__(self):\n self._adjacency_list = {\n\n }", "def _internal_build(self):\n self.nodes = self.__tree.Nodes()\n self.edges = self.__tree.Edges()\n self.augmentedEdges = {}\n for key, val in self.__tree.AugmentedEdges().items():\n self.augmentedEdges[key] = list(val)\n self.root = self.__tree.Root()\n\n seen = set()\n self.branches = set()\n\n # Find all of the branching nodes in the tree, degree > 1\n # That is, they appear in more than one edge\n for e1, e2 in self.edges:\n if e1 not in seen:\n seen.add(e1)\n else:\n self.branches.add(e1)\n\n if e2 not in seen:\n seen.add(e2)\n else:\n self.branches.add(e2)\n\n # The nodes that are not branches are leaves\n self.leaves = set(self.nodes.keys()) - self.branches\n self.leaves.remove(self.root)", "def _build_graph_general(self): \n\n #Find a canonical coloring scheme\n #Each node has a color that is determined by the non-mapped aspects\n nodecolors=set()\n for nl in self.net.iter_node_layers():\n nodecolors.add(self._slice_node_layer_not_allowed(nl))\n nodecolors_sorted=sorted(list(nodecolors))\n del nodecolors\n self._assert_full_order(nodecolors_sorted)\n self.colormap=dict( ((color,colorid) for colorid,color in enumerate(nodecolors_sorted) ))\n\n #each aux node has a color that is determined by the aspect\n self.auxcolormap=dict( ((auxcolor, auxcolorid+len(self.colormap)) for auxcolorid,auxcolor in enumerate(sorted(self.asp)) ) )\n\n\n #Add the underlying network\n #node-layers:\n for nl in self.net.iter_node_layers():\n nlid=self._get_node_id(nl)\n color=self._slice_node_layer_not_allowed(nl)\n colorid=self.colormap[color]\n self.add_node(nlid,colorid)\n\n #edges between node-layers:\n for nl1 in self.net.iter_node_layers():\n for nl2 in self.net[nl1]:\n nl1id=self._get_node_id(nl1)\n nl2id=self._get_node_id(nl2)\n self.add_link(nl1id,nl2id)\n\n\n #Add the auxiliary nodes and edges\n #add the aux nodes\n for a in self.asp:\n for elayer in self.net.slices[a]:\n auxid=self._get_auxnode_id( (a,elayer) )\n auxcolorid=self.auxcolormap[a]\n self.add_node(auxid,auxcolorid)\n \n #add the aux edges\n for nl in self.net.iter_node_layers():\n for a in self.asp:\n nlid=self._get_node_id(nl)\n auxid=self._get_auxnode_id( (a,nl[a]) )\n self.add_link(nlid,auxid)", "def update(self):\n\n for node in self.nodes:\n for edge in node.edges:\n for i, edge_node in enumerate(edge.nodes):\n if edge_node.id != node.id:\n edge_node.add_edge(edge)\n\n return self", "def reset(self):\n\t\tself.graph = OrderedDict()\n\t\tself.bottoms = OrderedDict()\n\t\tself.output_shape = OrderedDict()\n\t\tself.cur_tensor = None\n\t\tself.cur_id = None\n\t\tself.tmp_list = []\n\t\tself.log_init()", "def reset_graph(self):\n raise NotImplementedError", "def complete_graph(self):\n root_nodes = set()\n\n for name, a_block in self.wf['action'].items():\n\n a_block['name'] = name\n\n for n in a_block.get('needs', []):\n if not self.wf['action'][n].get('next', None):\n self.wf['action'][n]['next'] = set()\n self.wf['action'][n]['next'].add(name)\n\n if not a_block.get('needs', None):\n root_nodes.add(name)\n\n self.wf['root'] = root_nodes", "def start_new_graph(self):\n self.nodes = {}\n self.reset_graph()", "def populate_graph(self):", "def _clean_graph(self):\n for entry_node in self._entry_nodes:\n self._clean_graph_visit(entry_node.get_func_first_node(), {})", "def clear(self):\n \n self.node_set.clear()\n self.prefix.clear()\n self.suffix.clear()\n self.num_node = 0\n self.edges = 0", "def rebuild_graph_data(self, consolidator=None):\n\n if len(self.nodes) == 0:\n raise DomainException(\"No nodes supplied to graph!\")\n\n if consolidator != None:\n for node in self.nodes.values():\n na = set()\n nat = set()\n no = set()\n nr = set()\n ni = set()\n nir = set()\n nrc = Counter()\n for atype, attribute in node.attributes:\n try:\n atype = consolidator(atype)\n except ConsolidatorException:\n continue\n na.add((atype, attribute))\n nat.add(atype)\n\n for rtype, dest in node.outgoing_relations:\n try:\n rtype = consolidator(rtype)\n except ConsolidatorException:\n continue\n no.add((rtype, dest))\n nr.add(rtype)\n nrc[rtype] += 1\n \n for rtype, pred in node.incoming_relations:\n try:\n rtype = consolidator(rtype)\n except ConsolidatorException:\n continue\n ni.add((rtype, pred))\n nir.add(rtype)\n nrc[rtype] += 1\n\n #update values\n node.attributes = na\n node.outgoing_relations = no\n node.incoming_relations = ni\n node.rtypes = nr\n node.i_rtypes = nir\n node.atypes = nat\n node.rtype_count = nrc\n\n # ==== compute member variables ====\n self.usage_map = self.map_uses()\n self.usage_counts = {x:len(y) for x,y in self.usage_map.items()}\n self.rtype_vectors = self.index_rtypes()\n self.node_vectors = self.index_nodes()\n self.rkdtree_keys, _rvalues = zip(*self.rtype_vectors.items())\n self.rkdtree = cKDTree(_rvalues)\n self.nkdtree_keys, _nvalues = zip(*self.node_vectors.items())\n self.nkdtree = cKDTree(_nvalues)\n\n # ==== precompute some vector constructs ====\n for node in self.nodes.values():\n node.compute_dicts(self)\n\n # ==== compute tf-idf weights for all nodes ====\n\n #calculate number of nodes containing rtype and \n #find maximum frequency rtype for any single node\n maxftd = 0\n c2 = Counter()\n for y in self.nodes.values():\n for k,z in y.rtype_count.items():\n c2[k] += 1\n if z > maxftd:\n maxftd = z\n\n #calculate augmented term frequency\n tf = Counter()\n for x,y in self.nodes.items():\n for z,v in y.rtype_count.items():\n tf[(x,z)] = 0.5 + 0.5*(v/maxftd)\n\n #calculate inverse document frequency\n idf = Counter()\n N = len(self.nodes)\n for x in c2:\n idf[x] = log(N / c2[x])\n\n tfidf = {}\n for x,y in self.nodes.items():\n for z in y.rtype_count:\n tmp = tfidf.setdefault(x,{})\n tmp[z] = tf[(x,z)] * idf[z]\n\n self.tfidf = tfidf\n self.dirty = False", "def prepare(self):\n if self._ready_nodes is not None:\n raise ValueError(\"cannot prepare() more than once\")\n\n self._ready_nodes = [\n i.node for i in self._node2info.values() if i.npredecessors == 0\n ]\n # ready_nodes is set before we look for cycles on purpose:\n # if the user wants to catch the CycleError, that's fine,\n # they can continue using the instance to grab as many\n # nodes as possible before cycles block more progress\n cycle = self._find_cycle()\n if cycle:\n raise CycleError(f\"nodes are in a cycle\", cycle)", "def _reset_gradients(self):\n self.grad = None # gradient itself\n self.grad_fn = None # functions to call for gradient\n self.grad_expected = 0 # number of gradients expected from parents\n self.grad_received = 0 # number of gradients received from parents\n self.children = [] # children of node in graph\n self.ctx = AutogradContext() # contexts for AutogradFunctions", "def _reset_traversal_state(self):\n for n in self.nodes.values():\n n.reset_traversal_state()", "def clean_local_memory(self):\n self.namespaces = self._rdf_builder.namespaces\n self.dataset = self._rdf_builder.dataset\n\n self.ontology_graph = self._rdf_builder.ontology_graph\n self.instance_graph = self._rdf_builder.instance_graph\n self.claim_graph = self._rdf_builder.claim_graph\n self.perspective_graph = self._rdf_builder.perspective_graph\n self.interaction_graph = self._rdf_builder.interaction_graph", "def _setup_graph(self):\n pass", "def _setup_graph(self):\n pass", "def init_graph(self):\n import dgl\n\n adj_list = []\n for rel_type in range(1, self.n_relations, 1):\n edge_idxs = self.ckg.filter_edges(\n lambda edge: edge.data[\"relation_id\"] == rel_type\n )\n sub_graph = (\n dgl.edge_subgraph(self.ckg, edge_idxs, preserve_nodes=True)\n .adjacency_matrix(transpose=False, scipy_fmt=\"coo\")\n .astype(\"float\")\n )\n rowsum = np.array(sub_graph.sum(1))\n d_inv = np.power(rowsum, -1).flatten()\n d_inv[np.isinf(d_inv)] = 0.0\n d_mat_inv = sp.diags(d_inv)\n norm_adj = d_mat_inv.dot(sub_graph).tocoo()\n adj_list.append(norm_adj)\n\n final_adj_matrix = sum(adj_list).tocoo()\n indices = torch.LongTensor([final_adj_matrix.row, final_adj_matrix.col])\n values = torch.FloatTensor(final_adj_matrix.data)\n adj_matrix_tensor = torch.sparse.FloatTensor(indices, values, self.matrix_size)\n return adj_matrix_tensor.to(self.device)", "def reset_edges(self):\n super().reset_edges()\n\n # If we're in default state, notheing to rest\n if self._modified_weighted_adj_matrices is None:\n return\n\n # Degrees are reset, so we need to reset the original weight scaling\n if self.scale_weights and not self.scaling_skipped:\n self._scale_weights_to_degree()\n self._generate_weighted_adj_matrices()\n else:\n # No weight scaling so just load prev values from cache\n self.weighted_adj_matrices = {**self.weighted_adj_matrices, **self._modified_weighted_adj_matrices}\n self._modified_weighted_adj_matrices = None", "def setup_ant(self):\n self.visited_nodes[1:] = []\n self.actual_node = self.start_pos", "def __init__(self):\n self._graph = DirectedGraph()\n self._graph_copies = []", "def prepare_graph(\n self,\n adjacency,\n weights,\n weighted=False,\n undirected=False,\n force_dense=True,\n noselfloop=True,\n verbose=True,\n ):\n\n # df_adj = pd.read_csv(in_folder + adj_name, index_col=0) # read adjacency file\n print(\"\\nAdjacency shape: {0}\".format(adjacency.shape), flush=True)\n\n # create the graph adding nodes and edges\n A = self.read_graph(\n adj=adjacency,\n weights=weights,\n weighted=weighted,\n undirected=undirected,\n noselfloop=noselfloop,\n verbose=verbose,\n )\n\n nodes = list(A[0].nodes)\n print(\"\\nNumber of nodes =\", len(nodes), flush=True)\n print(\"Number of layers =\", len(A), flush=True)\n if verbose:\n self.print_graph_stat(A)\n\n # save the multilayer network in a tensor with all layers\n if force_dense:\n B = self.build_B_from_A(A, nodes=nodes)\n else:\n B = self.build_sparse_B_from_A(A)\n\n return A, B, nodes", "def formAdjacencyMatrix(self):\n self.adjacencyMatrix = dict()\n for i in self.node:\n self.adjacencyMatrix[i] = dict()\n for j in self.node:\n self.adjacencyMatrix[i][j] = 0\n \n for ij in self.link:\n self.adjacencyMatrix[self.link[ij].tail][self.link[ij].head] = 1", "def finalize(self):\n # Establish forward/reverse star lists, set travel times to free-flow\n for i in self.node:\n self.node[i].forwardStar = list()\n self.node[i].reverseStar = list()\n \n for ij in self.link:\n self.node[self.link[ij].tail].forwardStar.append(ij)\n self.node[self.link[ij].head].reverseStar.append(ij)\n self.link[ij].cost = self.link[ij].freeFlowTime + self.link[ij].length * self.distanceFactor + self.link[ij].toll * self.tollFactor\n self.link[ij].flow = 0\n \n for OD in self.ODpair:\n self.ODpair[OD].leastCost = 0", "def reset_edges(self):\n\n # Ensure original edges are stored in cache, otherwise nothing to do.\n if self._modified_edges is None or self._weighted_modified_edges is None:\n return\n\n # Restore the former value from cache\n self.adj_matrices = {**self.adj_matrices, **self._modified_edges}\n self.degree_weighted_matrices = {**self.degree_weighted_matrices, **self._weighted_modified_edges}\n self.in_degree = {**self.in_degree, **self._orig_in_degree}\n self.out_degree = {**self.out_degree, **self._orig_out_degree}\n\n # Reset the edge and degree cache\n self._modified_edges = None\n self._weighted_modified_edges = None\n self._orig_in_degree = dict()\n self._orig_out_degree = dict()", "def _refine_matrix_with_additional_connections(self):\n new_graph = self.graph.copy()\n for node in tqdm.tqdm(self.graph.nodes(), disable=not self.verbose):\n if self.graph.node[node][\"type\"] == \"hashtag\":\n for neighbour in self.graph.neighbors(node):\n if self.graph.node[neighbour][\"type\"] == \"username\":\n for other_node in self.graph.neighbors(neighbour):\n if self.graph.node[other_node][\"type\"] == \"hashtag\" \\\n and not self.graph.has_edge(node, other_node) \\\n and not node == other_node:\n new_graph.add_edge(node, other_node)\n self.graph = new_graph", "def refresh_metadata(self):\n #self.node_index = None\n #self.edge_index = None\n #self._calc_edge_centers = False\n #self._calc_cell_centers = False\n #self._calc_vcenters = False\n self._node_to_edges = None\n self._node_to_cells = None", "def build_graph(self):\n for each_list in self.lab.look():\n vertice = self._add_vertice(each_list)\n if vertice:\n self.unvisited.add(vertice)\n self.graph.addEdge((self.current, vertice))\n \n self.unvisited -= self.visited\n self._connect_neighbours()", "def fix_graph(self,graph):\n graph_compleate_reachable = False\n while not graph_compleate_reachable:\n not_reachable_in ,not_reachable_out = self.not_reachable(graph)\n for n in not_reachable_in:\n graph.add_edge(self.random.randint(0,n-1),n)\n for n in not_reachable_out:\n graph.add_edge(n,self.random.randint(n+1, self.nodes-1))\n graph_compleate_reachable = len(not_reachable_in)==0 and len(not_reachable_out)==0\n return graph", "def setUp(self):\n self.complete = nx.Graph()\n self.complete.add_edge(1, 2)\n self.complete.add_edge(2, 3)\n self.complete.add_edge(1, 3)\n\n self.small_tree = nx.Graph()\n self.small_tree.add_edge(1, 2)\n self.small_tree.add_edge(2, 3)\n self.small_tree.add_edge(3, 4)\n self.small_tree.add_edge(1, 4)\n self.small_tree.add_edge(2, 4)\n self.small_tree.add_edge(4, 5)\n self.small_tree.add_edge(5, 6)\n self.small_tree.add_edge(5, 7)\n self.small_tree.add_edge(6, 7)\n\n self.deterministic_graph = nx.Graph()\n self.deterministic_graph.add_edge(1, 2)\n self.deterministic_graph.add_edge(1, 3)\n self.deterministic_graph.add_edge(3, 4)\n self.deterministic_graph.add_edge(2, 4)\n self.deterministic_graph.add_edge(3, 5)\n self.deterministic_graph.add_edge(4, 5)\n self.deterministic_graph.add_edge(3, 6)\n self.deterministic_graph.add_edge(5, 6)", "def make_complete_graph(num_nodes):\n complete_digraph = {}\n if num_nodes > 0 and type(num_nodes) == int:\n neighbors = set([idx for idx in range(num_nodes)])\n for idx in range(num_nodes):\n complete_digraph[idx] = neighbors.copy() #creates adjacency set\n complete_digraph[idx].remove(idx) # pop out self-loop \n return complete_digraph", "def _pre_setup(self):\n super()._pre_setup()\n self.create_connections_indices()", "def _pre_setup(self):\n super()._pre_setup()\n self.create_connections_indices()", "def _pre_setup(self):\n super()._pre_setup()\n self.create_connections_indices()", "def legalize_graph(gm: pippy.fx.GraphModule) -> pippy.fx.GraphModule:\n indeg = {node: 0 for node in gm.graph.nodes}\n new_graph = pippy.fx.Graph()\n # Track how many unfulfilled dependencies each node has\n for node in gm.graph.nodes:\n for user in node.users:\n indeg[user] += 1\n queue: collections.deque = collections.deque()\n # Add all nodes with no dependencies to the queue\n for node in gm.graph.nodes:\n if indeg[node] == 0:\n queue.append(node)\n env: Dict[pippy.fx.Node, pippy.fx.Node] = {}\n # Pop nodes from the queue, and add nodes that have had all their\n # dependencies fulfilled\n while len(queue) > 0:\n cur = queue.popleft()\n env[cur] = new_graph.node_copy(cur, lambda x: env[x])\n for user in cur.users:\n indeg[user] -= 1\n if indeg[user] == 0:\n queue.append(user)\n # If the new graph's size is not as large as the old one, then there must be\n # a cycle (i.e. some node's dependencies were not satisfied.)\n if len(new_graph.nodes) < len(gm.graph.nodes):\n raise RuntimeError(f\"Input graph has cycles, unable to add {[node for node in indeg if indeg[node] != 0]}\")\n gm.graph = new_graph\n return gm", "def clear(self):\n self.nodes = list()\n self.inputs = list()\n self.nodes += [self]", "def setup(self):\n switches, links = self.generateTopology()\n self.graph = KytosGraph()\n self.graph.clear()\n self.graph.update_nodes(switches)\n self.graph.update_links(links)\n self.graph.set_path_fun(nx.shortest_simple_paths)", "def _build_graph(self):\n pass", "def __init__(self, nodes=None, edges=None):\n self._nodes = []\n self.nodes = nodes\n self._edges = []\n self.edges = edges\n self._create_connections()\n self._sorted_nodes = None\n self._node_wip = []", "def clear(self):\n self.mismatch_error = None\n self.pt_outs = None\n self._onnx_graph = None\n self.upper_graph_info = None\n self.lower_graph_info = None", "def reset_visited(self):\n self.__visited = False", "def __init__(self):\n\t\tself.edges = defaultdict(list)\n\t\tself.weights = {}\n\t\tself.connections = {}", "def __init__(self, directed=True):\n self.nodes = set()\n self.edges = []\n self.directed = directed", "def _initilise_graph_db(self):\n for collector in self.collectors:\n collector.init_graph_db()", "def processLoadedLinkNodes(self, isLoadingModel):\r\n \r\n if(isLoadingModel):\r\n #=====================================================\r\n # Hierarchical structure maintenance (See HierarchicalASGNode.py)\r\n #=====================================================\r\n for node in self.newLinkNodeQueue:\r\n # NOTE: node.in_connections_ could be > 1 in the metamodel since \r\n # AToM3 considers meta-model relations to have hierarchy active!\r\n # Of course I didn't intend this, and it has no meaning, so ignore!\r\n if(node.isHierarchicalLink() and len(node.in_connections_) == 1):\r\n for child in node.out_connections_:\r\n child._setHierParent(node.in_connections_[0])\r\n node.in_connections_[0]._addHierChildrenList(node.out_connections_) \r\n \r\n\r\n #=======================================================================\r\n # QOCA Constraints\r\n # Only do this if we are actually using QOCA in the first place...\r\n #=======================================================================\r\n if(isNotUsingQoca()): \r\n return\r\n \r\n for node in self.newLinkNodeQueue:\r\n # Apply any QOCA linear constraints\r\n # For graph grammars, graphObject_ may be none, so GGrule.py will also\r\n # trigger QOCA in its replaceSides() method\r\n if(hasattr(node, 'QOCA') and not node.__dict__.has_key('QOCA')):\r\n # Make sure that node has a method called 'QOCA', not an attribute\r\n node.QOCA(None)\r\n \r\n # Clean out the queue\r\n self.newLinkNodeQueue = []", "def clean(self):\n\t\tfor v in self:\n\t\t\tv.reset_distance()\n\t\t\tv.reset_predecessor()\n\t\t\tv.reset_visited()", "def __init__(self):\n self.adjList = {}", "def reset_nn(self): # Clear current network\n self.weights = np.zeros((p.num_rovers, self.n_weights))\n self.in_layer = np.zeros((p.num_rovers, self.n_inputs))\n self.hid_layer = np.zeros((p.num_rovers, self.n_nodes))\n self.out_layer = np.zeros((p.num_rovers, self.n_outputs))", "def _init_nodes(self, nodes):\n attributes = self.get_node_attributes()\n for node in nodes:\n if not self._is_node_added(node):\n self._nodes.append(self._get_node_as_dictionary(node, attributes))", "def topo_sort(self):\n # TODO: detect cycles\n self.find_reachable_nodes()\n # save list of nodes in topo order\n self.nodes = []\n # assign each node an id field incrementally\n cur_id = 0\n # count visited outgoing edges for each node\n unvisited = {}\n for nid, node in list(self.found.items()):\n unvisited[nid] = node.nout\n queue = [self.root]\n #print >>sys.stderr, '+++'\n while queue:\n # take off nodes whose all outgoing edges are visited from\n # queue head\n node = queue.pop(0)\n self.nodes.append(node)\n node.hg = self\n node.id = cur_id\n cur_id += 1\n for edge in node.incoming:\n edge.hg = self\n for tailnode in edge.tail:\n #print >>sys.stderr, tailnode\n unvisited[id(tailnode)] -= 1\n if unvisited[id(tailnode)] == 0:\n queue.append(tailnode)\n self.sanity_check()\n self.tasks_done.add('topo_sort')", "def abort(self):\n for node in self.dep_graph.nodes_iter():\n role = self.roles[node]\n role.new_rep = role.cur_rep\n role.new_hosts = list(role.cur_hosts)\n for edge in self.dep_graph.edges_iter():\n edge_data = self.dep_graph.get_edge_data(*edge)\n edge_data['new_weight'] = edge_data['cur_weight']", "def create_initial_graph(self):\n # Initialise weights\n for link in self.gene_links:\n link.weight = random.uniform(weight_init_min, weight_init_max)\n # Initialise biases\n for node in self.gene_nodes:\n node.bias = random.uniform(bias_init_min, bias_init_max)\n if node.can_modify:\n node.act_func = self.act_set.get_random_activation_func()\n if node.act_func in [activations.gaussian, activations.sin]:\n if node.act_func.__name__[0] == \"g\":\n node.freq += random.uniform(-guass_freq_adjust, guass_freq_adjust)\n elif node.act_func.__name__[0] == \"s\":\n node.freq += random.uniform(-sin_freq_adjust, sin_freq_adjust)\n node.amp += random.uniform(-func_amp_adjust, func_amp_adjust)\n node.vshift += random.uniform(-func_vshift_adjust, func_vshift_adjust)", "def reset(self):\n self.visited = set()\n del self.targets[0]", "def _init_graph(self):\n self.G = nx.Graph()\n self.G.add_nodes_from([1,2,3,4,5])\n self.G.add_edges_from([(1,2),(2,3),(2,4)\\\n ,(2,5),(3,4),(4,5)])", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def build_graph(self):\n self._reset_iterator_memory()\n self._construct_graph_handler()\n assert self.graph_handler\n for rxn_id in self.graph_handler.get_valid_reaction_ids():\n rxn = db.Reaction(rxn_id, self._reactions)\n self.graph_handler.add_rxn(rxn)", "def topology_complete(self):\n\t\tfor i in range(len(self.sites) - 1):\n\t\t\tfor j in range(i + 1, len(self.sites)):\n\t\t\t\tself.sites[i].neighbors.append(self.sites[j])\n\t\t\t\tself.sites[j].neighbors.append(self.sites[i])", "def __init__(self):\n self.data_graph = self._initialise_data()\n\n self.messages_sent = []\n self.messages_received = []\n\n self.answered_true = set()\n self.implied_true = set()\n\n self.current_subgraph = set()", "def clear(self):\n self._nodes = { }\n self._arcs = set()", "def solve(self):\n self.left -= len(self.nodes)\n \n def depths(x,depth = 0):\n depth+=1\n for y in self.graph[x]:\n if y in self.nodes:\n self.nodes.remove(y)\n depth = depths(y,depth)\n return depth\n \n while len(self.nodes):\n x = self.nodes.pop()\n self.firstGen.append(depths(x))\n #print self.graph\n #print self.nodes\n #print self.firstGen", "def __initialize_connection_strengths(G):\n G_prime = G.__deepcopy__() # construct a deepcopy of the graph\n # for every vertex in the graph, initialize the connection strength to zero\n for node in G_prime.get_nodeset(): node.add_attribute(StoerWagner.CONNECTION_STRENGTH_ATTRIBUTE, float(0))\n return G_prime # return the new graph", "def _initializeAdjacencyList(self):\n\n if self.comm.rank == 0:\n # First, create a dictionary of common edges shared by components\n edgeToFace = {}\n for elemID in self.bdfInfo.elements:\n elemInfo = self.bdfInfo.elements[elemID]\n elemConn = elemInfo.nodes\n compID = self.meshLoader.nastranToTACSCompIDDict[elemInfo.pid]\n nnodes = len(elemConn)\n if nnodes >= 2:\n for j in range(nnodes):\n nodeID1 = elemConn[j]\n nodeID2 = elemConn[(j + 1) % nnodes]\n\n if nodeID1 < nodeID2:\n key = (nodeID1, nodeID2)\n else:\n key = (nodeID2, nodeID1)\n\n if key not in edgeToFace:\n edgeToFace[key] = [compID]\n elif compID not in edgeToFace[key]:\n edgeToFace[key].append(compID)\n\n # Now we loop back over each element and each edge. By\n # using the edgeToFace dictionary, we can now determine\n # which components IDs (jComp) are connected to the\n # current component ID (iComp).\n self.adjacentComps = []\n\n for edgeKey in edgeToFace:\n if len(edgeToFace[edgeKey]) >= 2:\n for i, iComp in enumerate(edgeToFace[edgeKey][:-1]):\n for jComp in edgeToFace[edgeKey][i + 1 :]:\n if iComp < jComp:\n dvKey = (iComp, jComp)\n else:\n dvKey = (jComp, iComp)\n if dvKey not in self.adjacentComps:\n self.adjacentComps.append(dvKey)\n\n else:\n self.adjacentComps = None\n\n # Wait for root\n self.comm.barrier()", "def _update_dead_nodes(self) -> None:\n with self._dead_node_lock:\n self._dead_nodes = self._seen_nodes - self._running_nodes", "def cfdProcessNodeTopology(self):\r\n self.nodeElements = self.cfdInvertConnectivity(self.elementNodes)\r\n self.nodeFaces = self.cfdInvertConnectivity(self.faceNodes)", "def direct_network(self):\n #print list(self.get_subgraphs())\n graphs = [self._depth_first_directed(g) for g in self.get_subgraphs()]\n self._network = reduce(lambda a, b: nx.union(a, b), graphs)", "def fit(self, adjacency: Union[sparse.csr_matrix, np.ndarray], seeds: Union[np.ndarray, dict] = None) \\\n -> 'Propagation':\n adjacency = check_format(adjacency)\n n = adjacency.shape[0]\n index_seed, index_remain, labels_seed = self._instanciate_vars(adjacency, seeds)\n\n if self.node_order == 'random':\n np.random.shuffle(index_remain)\n elif self.node_order == 'decreasing':\n index = np.argsort(-adjacency.T.dot(np.ones(n))).astype(np.int32)\n index_remain = index[index_remain]\n elif self.node_order == 'increasing':\n index = np.argsort(adjacency.T.dot(np.ones(n))).astype(np.int32)\n index_remain = index[index_remain]\n\n labels = -np.ones(n, dtype=np.int32)\n labels[index_seed] = labels_seed\n labels_remain = np.zeros_like(index_remain, dtype=np.int32)\n\n indptr = adjacency.indptr.astype(np.int32)\n indices = adjacency.indices.astype(np.int32)\n if self.weighted:\n data = adjacency.data.astype(np.float32)\n else:\n data = np.ones(n, dtype=np.float32)\n\n t = 0\n while t < self.n_iter and not np.array_equal(labels_remain, labels[index_remain]):\n t += 1\n labels_remain = labels[index_remain].copy()\n labels = np.asarray(vote_update(indptr, indices, data, labels, index_remain))\n\n membership = membership_matrix(labels)\n membership = normalize(adjacency.dot(membership))\n\n self.labels_ = labels\n self.membership_ = membership\n\n return self", "def reset(self):\n super(CheckMayaAbstract, self).reset()\n self.errorNodes = list()\n self._errorDict = {}", "def make_complete_graph(num_nodes):\n xgraph = {} #Create a Blank Dict\n if num_nodes - 1 < 0: # checks to see if the num_nodes is less then 0 (negative number) if it is return empty graph (dict). Could probably combine the If statments for negative nodes and 1 node together\n return xgraph\n if num_nodes - 1 == 0: # If the number of nodes is 1 or returns a one node dict because there are no edges to compute\n xgraph[0] = set([]) # creates a dict that represents a single node graph as per the requirement\n return xgraph # the empty Graph\n else:\n for base_node in range(num_nodes): # This portion starts the build phase. for each node it will compute the theretical maximum amount of edges\n xlist = set([]) # defines an empty list. We first build a list for each node and the append to a dict. This list is erased with each iteration\n #print base_node # testing - REMOVE\n for edge_node in range(num_nodes):\n #print edge_node # testing - REMOVE\n if edge_node != base_node: #No Looping is allowed for this project. Therefor we check to insure the we are not counting a self node connection (edge_node NOT equal base_node)\n xlist.add(edge_node) # Populating list that will be added to dict\n\n xgraph[base_node] = xlist # Appending created list to the dict\n\n return xgraph # returning populated dict", "def make_complete_graph(num_nodes):\n xgraph = {} #Create a Blank Dict\n if num_nodes - 1 < 0: # checks to see if the num_nodes is less then 0 (negative number) if it is return empty graph (dict). Could probably combine the If statments for negative nodes and 1 node together\n return xgraph\n if num_nodes - 1 == 0: # If the number of nodes is 1 or returns a one node dict because there are no edges to compute\n xgraph[0] = set([]) # creates a dict that represents a single node graph as per the requirement\n return xgraph # the empty Graph\n else:\n for base_node in range(num_nodes): # This portion starts the build phase. for each node it will compute the theretical maximum amount of edges\n xlist = set([]) # defines an empty list. We first build a list for each node and the append to a dict. This list is erased with each iteration\n #print base_node # testing - REMOVE\n for edge_node in range(num_nodes):\n #print edge_node # testing - REMOVE\n if edge_node != base_node: #No Looping is allowed for this project. Therefor we check to insure the we are not counting a self node connection (edge_node NOT equal base_node)\n xlist.add(edge_node) # Populating list that will be added to dict\n\n xgraph[base_node] = xlist # Appending created list to the dict\n\n return xgraph # returning populated dict", "def initGraphs(self):\n \n self.graph = ConjunctiveGraph()\n # Create a separate graph for annotations\n self.annotationGraph = ConjunctiveGraph()\n \n self.log.debug('Adding namespaces to graphs')\n # Bind namespaces to graphs\n for namespace in self.namespaces:\n self.graph.namespace_manager.bind(namespace, self.namespaces[namespace])\n\n # Same for annotation graph\n for namespace in self.annotationNamespaces:\n self.annotationGraph.namespace_manager.bind(namespace, self.annotationNamespaces[namespace])\n \n # Add schema information\n self.log.debug('Adding some schema information (dimension and measure properties) ')\n self.addDataCellProperty()\n\n # Add dimensions \n self.graph.add((self.namespaces['tablink']['dimension'], RDF.type, self.namespaces['qb']['DimensionProperty']))\n \n #self.graph.add((self.namespaces['tablink']['label'], RDF.type, RDF['Property']))", "def clean_edges(self):\n for from_node in self.all_nodes():\n for to_node in self.all_nodes():\n if from_node == to_node:\n continue\n dup = list(filter(lambda x: x.from_node == from_node and x.to_node == to_node, self.edges))\n if len(dup) > 1:\n for d in dup[1:]:\n self.edges.remove(d)", "def __init__(self, nodes=[], edges=[], connections=[], directed=False, isNetwork=False):\n Node.count=0\n Edge.count=0\n self.nodes = [n for n in nodes]\n self.edges = [e for e in edges]\n self.connections = [(a, b) for (a, b) in connections]\n self.isDirected = directed\n self.isNetwork = isNetwork", "def _CreateAdjacencyListGraph(self):\n graph = dict()\n for nodes in self.nodes:\n graph[nodes[0]] = set()\n for edges in self.edges:\n graph[edges[0]].add(edges[1])\n return graph", "def reset(self):\n self._clusters = {}\n self._clusters_val = {}\n self._centroids = {}\n self.store()", "def build_graph(self):\n pass", "def clean_edges(self):", "def reset_weight_zero(self):\n self.node_embedding = np.random.uniform(low=-0.5, high=0.5, size=(self.vocab_size, self.layer1_size)).astype(\n np.float32)\n self.context_embedding = np.zeros((self.vocab_size, self.layer1_size), dtype=np.float32)\n\n self.centroid = np.zeros((self.k, self.layer1_size), dtype=np.float32)\n self.covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.inv_covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.pi = np.zeros((self.vocab_size, self.k), dtype=np.float32)\n log.info(\"reset communities data| k: {}\".format(self.k))", "def init_adjacency(self, A):\n A[A==0] = self.INFINITY", "def add_resets_edges(graph, start):\n for node in graph.nodes:\n neighbors = list(graph[node])\n if neighbors == [node]:\n graph.add_edge(node, start, label=\"RESET / \")", "def transition(self):\n for node in self.net.nodes():\n if node not in self.evidence:\n self.update_node(node)", "def __init__(self):\n\t\tself.edges = defaultdict(list)\n\t\tself.weights = {}", "def __init__(self):\n\t\tself.edges = defaultdict(list)\n\t\tself.weights = {}", "def _generate_graph(self) -> None:\n self.g_ = nx.random_partition_graph(list(self._community_sizes),\n p_in=self.community_p_in,\n p_out=self.community_p_out,\n seed=self.seed)\n\n for _, nv in self.g_.nodes.data():\n nv[\"infected\"] = 0\n nv[\"immune\"] = False\n nv[\"alive\"] = True\n nv[\"_edges\"] = []\n nv[\"isolated\"] = False\n nv[\"mask\"] = 0.0", "def generate_full_adj(self):\n edges = np.zeros(shape=(self.n_balls, self.n_balls))\n row_idx = 0 # start filling adjacency mat from root node\n col_idx = 1 # skip the root node and start from 2nd node\n for l in range(self.nl):\n for n in range(self.nn[l]):\n edges[row_idx, col_idx:col_idx + self.nc[l]] = 1\n # Increase counters after filling connections for a parent node\n col_idx += self.nc[l]\n row_idx += 1\n return edges", "def update_node2edge(self):\n self.node2edge = {e.child : e for e in self.edge}\n childrenset = set(self.node2edge.keys())\n rootset = set(e.parent for e in self.edge).difference(childrenset)\n if len(rootset) > 1:\n raise Warning(\"there should be a single root: \" + str(rootset))\n if len(rootset) == 0:\n raise Exception(\"there should be at least one root!\")\n self.root = rootset.pop()", "def setUp(self):\n self.complete = nx.Graph()\n self.complete.add_edge(1, 2)\n self.complete.add_edge(2, 3)\n self.complete.add_edge(1, 3)\n\n self.small_tree = nx.Graph()\n self.small_tree.add_edge(1, 3)\n self.small_tree.add_edge(4, 3)\n self.small_tree.add_edge(2, 3)\n self.small_tree.add_edge(3, 5)\n self.small_tree.add_edge(5, 6)\n self.small_tree.add_edge(5, 7)\n self.small_tree.add_edge(6, 7)\n\n self.deterministic_graph = nx.Graph()\n self.deterministic_graph.add_edge(0, 1) # deg(0) = 1\n\n self.deterministic_graph.add_edge(1, 2) # deg(1) = 2\n\n self.deterministic_graph.add_edge(2, 3)\n self.deterministic_graph.add_edge(2, 4) # deg(2) = 3\n\n self.deterministic_graph.add_edge(3, 4)\n self.deterministic_graph.add_edge(3, 5)\n self.deterministic_graph.add_edge(3, 6) # deg(3) = 4\n\n self.deterministic_graph.add_edge(4, 5)\n self.deterministic_graph.add_edge(4, 6)\n self.deterministic_graph.add_edge(4, 7) # deg(4) = 5\n\n self.deterministic_graph.add_edge(5, 6)\n self.deterministic_graph.add_edge(5, 7)\n self.deterministic_graph.add_edge(5, 8)\n self.deterministic_graph.add_edge(5, 9) # deg(5) = 6\n\n self.deterministic_graph.add_edge(6, 7)\n self.deterministic_graph.add_edge(6, 8)\n self.deterministic_graph.add_edge(6, 9) # deg(6) = 6\n\n self.deterministic_graph.add_edge(7, 8)\n self.deterministic_graph.add_edge(7, 9) # deg(7) = 5\n\n self.deterministic_graph.add_edge(8, 9) # deg(8) = 4", "def init_vertices(self):\n self.vertices = []\n for key in self.graph:\n self.vertices.append(self.Vertex(key, self.graph[key]))", "def update(self):\r\n self.g = self.create_graph()", "def clear_nastran(self):\n self.eid_map = {}\n self.nid_map = {}\n self.eid_to_nid_map = {}\n self.element_ids = None\n self.node_ids = None" ]
[ "0.65324295", "0.63045096", "0.6301467", "0.6183333", "0.6102479", "0.6050887", "0.6023047", "0.5987564", "0.5974988", "0.58748007", "0.5874257", "0.58559334", "0.5806248", "0.5778933", "0.5771964", "0.5762822", "0.57474977", "0.57438475", "0.57006", "0.5695009", "0.56947756", "0.5658297", "0.5654165", "0.56452036", "0.56430835", "0.5635908", "0.56318253", "0.56318253", "0.5621224", "0.56173503", "0.5595307", "0.55855775", "0.55817443", "0.5577678", "0.557441", "0.554981", "0.5540656", "0.5532914", "0.5531942", "0.5516941", "0.55155355", "0.5508153", "0.5497178", "0.5497178", "0.5497178", "0.54649365", "0.546345", "0.54585266", "0.54529124", "0.5439693", "0.5429664", "0.5423405", "0.5422416", "0.54088396", "0.54016113", "0.5399264", "0.5392605", "0.5391486", "0.5367117", "0.5363376", "0.5363359", "0.5342555", "0.53364915", "0.53265774", "0.5322334", "0.53220356", "0.5321241", "0.53210264", "0.5310078", "0.5301045", "0.5292643", "0.52767885", "0.52759117", "0.5266936", "0.5263736", "0.5247962", "0.5243319", "0.5237699", "0.52289337", "0.52289337", "0.5223753", "0.52194643", "0.521289", "0.52058244", "0.5203413", "0.5202457", "0.51980627", "0.5195228", "0.5193464", "0.5192745", "0.51904464", "0.5189851", "0.5189851", "0.5188068", "0.5186776", "0.5186402", "0.51830775", "0.5180551", "0.51776546", "0.51723" ]
0.7615647
0