viz_tests.py 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169
  1. # Licensed to the Apache Software Foundation (ASF) under one
  2. # or more contributor license agreements. See the NOTICE file
  3. # distributed with this work for additional information
  4. # regarding copyright ownership. The ASF licenses this file
  5. # to you under the Apache License, Version 2.0 (the
  6. # "License"); you may not use this file except in compliance
  7. # with the License. You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing,
  12. # software distributed under the License is distributed on an
  13. # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
  14. # KIND, either express or implied. See the License for the
  15. # specific language governing permissions and limitations
  16. # under the License.
  17. # isort:skip_file
  18. import uuid
  19. from datetime import datetime
  20. import logging
  21. from math import nan
  22. from unittest.mock import Mock, patch
  23. import numpy as np
  24. import pandas as pd
  25. import tests.test_app
  26. import superset.viz as viz
  27. from superset import app
  28. from superset.constants import NULL_STRING
  29. from superset.exceptions import SpatialException
  30. from superset.utils.core import DTTM_ALIAS
  31. from .base_tests import SupersetTestCase
  32. from .utils import load_fixture
  33. logger = logging.getLogger(__name__)
  34. class BaseVizTestCase(SupersetTestCase):
  35. def test_constructor_exception_no_datasource(self):
  36. form_data = {}
  37. datasource = None
  38. with self.assertRaises(Exception):
  39. viz.BaseViz(datasource, form_data)
  40. def test_process_metrics(self):
  41. # test TableViz metrics in correct order
  42. form_data = {
  43. "url_params": {},
  44. "row_limit": 500,
  45. "metric": "sum__SP_POP_TOTL",
  46. "entity": "country_code",
  47. "secondary_metric": "sum__SP_POP_TOTL",
  48. "granularity_sqla": "year",
  49. "page_length": 0,
  50. "all_columns": [],
  51. "viz_type": "table",
  52. "since": "2014-01-01",
  53. "until": "2014-01-02",
  54. "metrics": ["sum__SP_POP_TOTL", "SUM(SE_PRM_NENR_MA)", "SUM(SP_URB_TOTL)"],
  55. "country_fieldtype": "cca3",
  56. "percent_metrics": ["count"],
  57. "slice_id": 74,
  58. "time_grain_sqla": None,
  59. "order_by_cols": [],
  60. "groupby": ["country_name"],
  61. "compare_lag": "10",
  62. "limit": "25",
  63. "datasource": "2__table",
  64. "table_timestamp_format": "%Y-%m-%d %H:%M:%S",
  65. "markup_type": "markdown",
  66. "where": "",
  67. "compare_suffix": "o10Y",
  68. }
  69. datasource = Mock()
  70. datasource.type = "table"
  71. test_viz = viz.BaseViz(datasource, form_data)
  72. expect_metric_labels = [
  73. u"sum__SP_POP_TOTL",
  74. u"SUM(SE_PRM_NENR_MA)",
  75. u"SUM(SP_URB_TOTL)",
  76. u"count",
  77. ]
  78. self.assertEqual(test_viz.metric_labels, expect_metric_labels)
  79. self.assertEqual(test_viz.all_metrics, expect_metric_labels)
  80. def test_get_df_returns_empty_df(self):
  81. form_data = {"dummy": 123}
  82. query_obj = {"granularity": "day"}
  83. datasource = self.get_datasource_mock()
  84. test_viz = viz.BaseViz(datasource, form_data)
  85. result = test_viz.get_df(query_obj)
  86. self.assertEqual(type(result), pd.DataFrame)
  87. self.assertTrue(result.empty)
  88. def test_get_df_handles_dttm_col(self):
  89. form_data = {"dummy": 123}
  90. query_obj = {"granularity": "day"}
  91. results = Mock()
  92. results.query = Mock()
  93. results.status = Mock()
  94. results.error_message = Mock()
  95. datasource = Mock()
  96. datasource.type = "table"
  97. datasource.query = Mock(return_value=results)
  98. mock_dttm_col = Mock()
  99. datasource.get_column = Mock(return_value=mock_dttm_col)
  100. test_viz = viz.BaseViz(datasource, form_data)
  101. test_viz.df_metrics_to_num = Mock()
  102. test_viz.get_fillna_for_columns = Mock(return_value=0)
  103. results.df = pd.DataFrame(data={DTTM_ALIAS: ["1960-01-01 05:00:00"]})
  104. datasource.offset = 0
  105. mock_dttm_col = Mock()
  106. datasource.get_column = Mock(return_value=mock_dttm_col)
  107. mock_dttm_col.python_date_format = "epoch_ms"
  108. result = test_viz.get_df(query_obj)
  109. import logging
  110. logger.info(result)
  111. pd.testing.assert_series_equal(
  112. result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS)
  113. )
  114. mock_dttm_col.python_date_format = None
  115. result = test_viz.get_df(query_obj)
  116. pd.testing.assert_series_equal(
  117. result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS)
  118. )
  119. datasource.offset = 1
  120. result = test_viz.get_df(query_obj)
  121. pd.testing.assert_series_equal(
  122. result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 6, 0)], name=DTTM_ALIAS)
  123. )
  124. datasource.offset = 0
  125. results.df = pd.DataFrame(data={DTTM_ALIAS: ["1960-01-01"]})
  126. mock_dttm_col.python_date_format = "%Y-%m-%d"
  127. result = test_viz.get_df(query_obj)
  128. pd.testing.assert_series_equal(
  129. result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 0, 0)], name=DTTM_ALIAS)
  130. )
  131. def test_cache_timeout(self):
  132. datasource = self.get_datasource_mock()
  133. datasource.cache_timeout = 0
  134. test_viz = viz.BaseViz(datasource, form_data={})
  135. self.assertEqual(0, test_viz.cache_timeout)
  136. datasource.cache_timeout = 156
  137. test_viz = viz.BaseViz(datasource, form_data={})
  138. self.assertEqual(156, test_viz.cache_timeout)
  139. datasource.cache_timeout = None
  140. datasource.database.cache_timeout = 0
  141. self.assertEqual(0, test_viz.cache_timeout)
  142. datasource.database.cache_timeout = 1666
  143. self.assertEqual(1666, test_viz.cache_timeout)
  144. datasource.database.cache_timeout = None
  145. test_viz = viz.BaseViz(datasource, form_data={})
  146. self.assertEqual(app.config["CACHE_DEFAULT_TIMEOUT"], test_viz.cache_timeout)
  147. class TableVizTestCase(SupersetTestCase):
  148. def test_get_data_applies_percentage(self):
  149. form_data = {
  150. "groupby": ["groupA", "groupB"],
  151. "metrics": [
  152. {
  153. "expressionType": "SIMPLE",
  154. "aggregate": "SUM",
  155. "label": "SUM(value1)",
  156. "column": {"column_name": "value1", "type": "DOUBLE"},
  157. },
  158. "count",
  159. "avg__C",
  160. ],
  161. "percent_metrics": [
  162. {
  163. "expressionType": "SIMPLE",
  164. "aggregate": "SUM",
  165. "label": "SUM(value1)",
  166. "column": {"column_name": "value1", "type": "DOUBLE"},
  167. },
  168. "avg__B",
  169. ],
  170. }
  171. datasource = self.get_datasource_mock()
  172. df = pd.DataFrame(
  173. {
  174. "SUM(value1)": [15, 20, 25, 40],
  175. "avg__B": [10, 20, 5, 15],
  176. "avg__C": [11, 22, 33, 44],
  177. "count": [6, 7, 8, 9],
  178. "groupA": ["A", "B", "C", "C"],
  179. "groupB": ["x", "x", "y", "z"],
  180. }
  181. )
  182. test_viz = viz.TableViz(datasource, form_data)
  183. data = test_viz.get_data(df)
  184. # Check method correctly transforms data and computes percents
  185. self.assertEqual(
  186. [
  187. "groupA",
  188. "groupB",
  189. "SUM(value1)",
  190. "count",
  191. "avg__C",
  192. "%SUM(value1)",
  193. "%avg__B",
  194. ],
  195. list(data["columns"]),
  196. )
  197. expected = [
  198. {
  199. "groupA": "A",
  200. "groupB": "x",
  201. "SUM(value1)": 15,
  202. "count": 6,
  203. "avg__C": 11,
  204. "%SUM(value1)": 0.15,
  205. "%avg__B": 0.2,
  206. },
  207. {
  208. "groupA": "B",
  209. "groupB": "x",
  210. "SUM(value1)": 20,
  211. "count": 7,
  212. "avg__C": 22,
  213. "%SUM(value1)": 0.2,
  214. "%avg__B": 0.4,
  215. },
  216. {
  217. "groupA": "C",
  218. "groupB": "y",
  219. "SUM(value1)": 25,
  220. "count": 8,
  221. "avg__C": 33,
  222. "%SUM(value1)": 0.25,
  223. "%avg__B": 0.1,
  224. },
  225. {
  226. "groupA": "C",
  227. "groupB": "z",
  228. "SUM(value1)": 40,
  229. "count": 9,
  230. "avg__C": 44,
  231. "%SUM(value1)": 0.4,
  232. "%avg__B": 0.3,
  233. },
  234. ]
  235. self.assertEqual(expected, data["records"])
  236. def test_parse_adhoc_filters(self):
  237. form_data = {
  238. "metrics": [
  239. {
  240. "expressionType": "SIMPLE",
  241. "aggregate": "SUM",
  242. "label": "SUM(value1)",
  243. "column": {"column_name": "value1", "type": "DOUBLE"},
  244. }
  245. ],
  246. "adhoc_filters": [
  247. {
  248. "expressionType": "SIMPLE",
  249. "clause": "WHERE",
  250. "subject": "value2",
  251. "operator": ">",
  252. "comparator": "100",
  253. },
  254. {
  255. "expressionType": "SIMPLE",
  256. "clause": "HAVING",
  257. "subject": "SUM(value1)",
  258. "operator": "<",
  259. "comparator": "10",
  260. },
  261. {
  262. "expressionType": "SQL",
  263. "clause": "HAVING",
  264. "sqlExpression": "SUM(value1) > 5",
  265. },
  266. {
  267. "expressionType": "SQL",
  268. "clause": "WHERE",
  269. "sqlExpression": "value3 in ('North America')",
  270. },
  271. ],
  272. }
  273. datasource = self.get_datasource_mock()
  274. test_viz = viz.TableViz(datasource, form_data)
  275. query_obj = test_viz.query_obj()
  276. self.assertEqual(
  277. [{"col": "value2", "val": "100", "op": ">"}], query_obj["filter"]
  278. )
  279. self.assertEqual(
  280. [{"op": "<", "val": "10", "col": "SUM(value1)"}],
  281. query_obj["extras"]["having_druid"],
  282. )
  283. self.assertEqual("(value3 in ('North America'))", query_obj["extras"]["where"])
  284. self.assertEqual("(SUM(value1) > 5)", query_obj["extras"]["having"])
  285. def test_adhoc_filters_overwrite_legacy_filters(self):
  286. form_data = {
  287. "metrics": [
  288. {
  289. "expressionType": "SIMPLE",
  290. "aggregate": "SUM",
  291. "label": "SUM(value1)",
  292. "column": {"column_name": "value1", "type": "DOUBLE"},
  293. }
  294. ],
  295. "adhoc_filters": [
  296. {
  297. "expressionType": "SIMPLE",
  298. "clause": "WHERE",
  299. "subject": "value2",
  300. "operator": ">",
  301. "comparator": "100",
  302. },
  303. {
  304. "expressionType": "SQL",
  305. "clause": "WHERE",
  306. "sqlExpression": "value3 in ('North America')",
  307. },
  308. ],
  309. "having": "SUM(value1) > 5",
  310. }
  311. datasource = self.get_datasource_mock()
  312. test_viz = viz.TableViz(datasource, form_data)
  313. query_obj = test_viz.query_obj()
  314. self.assertEqual(
  315. [{"col": "value2", "val": "100", "op": ">"}], query_obj["filter"]
  316. )
  317. self.assertEqual([], query_obj["extras"]["having_druid"])
  318. self.assertEqual("(value3 in ('North America'))", query_obj["extras"]["where"])
  319. self.assertEqual("", query_obj["extras"]["having"])
  320. @patch("superset.viz.BaseViz.query_obj")
  321. def test_query_obj_merges_percent_metrics(self, super_query_obj):
  322. datasource = self.get_datasource_mock()
  323. form_data = {
  324. "percent_metrics": ["sum__A", "avg__B", "max__Y"],
  325. "metrics": ["sum__A", "count", "avg__C"],
  326. }
  327. test_viz = viz.TableViz(datasource, form_data)
  328. f_query_obj = {"metrics": form_data["metrics"]}
  329. super_query_obj.return_value = f_query_obj
  330. query_obj = test_viz.query_obj()
  331. self.assertEqual(
  332. ["sum__A", "count", "avg__C", "avg__B", "max__Y"], query_obj["metrics"]
  333. )
  334. @patch("superset.viz.BaseViz.query_obj")
  335. def test_query_obj_throws_columns_and_metrics(self, super_query_obj):
  336. datasource = self.get_datasource_mock()
  337. form_data = {"all_columns": ["A", "B"], "metrics": ["x", "y"]}
  338. super_query_obj.return_value = {}
  339. test_viz = viz.TableViz(datasource, form_data)
  340. with self.assertRaises(Exception):
  341. test_viz.query_obj()
  342. del form_data["metrics"]
  343. form_data["groupby"] = ["B", "C"]
  344. test_viz = viz.TableViz(datasource, form_data)
  345. with self.assertRaises(Exception):
  346. test_viz.query_obj()
  347. @patch("superset.viz.BaseViz.query_obj")
  348. def test_query_obj_merges_all_columns(self, super_query_obj):
  349. datasource = self.get_datasource_mock()
  350. form_data = {
  351. "all_columns": ["colA", "colB", "colC"],
  352. "order_by_cols": ['["colA", "colB"]', '["colC"]'],
  353. }
  354. super_query_obj.return_value = {
  355. "columns": ["colD", "colC"],
  356. "groupby": ["colA", "colB"],
  357. }
  358. test_viz = viz.TableViz(datasource, form_data)
  359. query_obj = test_viz.query_obj()
  360. self.assertEqual(form_data["all_columns"], query_obj["columns"])
  361. self.assertEqual([], query_obj["groupby"])
  362. self.assertEqual([["colA", "colB"], ["colC"]], query_obj["orderby"])
  363. @patch("superset.viz.BaseViz.query_obj")
  364. def test_query_obj_uses_sortby(self, super_query_obj):
  365. datasource = self.get_datasource_mock()
  366. form_data = {"timeseries_limit_metric": "__time__", "order_desc": False}
  367. super_query_obj.return_value = {"metrics": ["colA", "colB"]}
  368. test_viz = viz.TableViz(datasource, form_data)
  369. query_obj = test_viz.query_obj()
  370. self.assertEqual(["colA", "colB", "__time__"], query_obj["metrics"])
  371. self.assertEqual([("__time__", True)], query_obj["orderby"])
  372. def test_should_be_timeseries_raises_when_no_granularity(self):
  373. datasource = self.get_datasource_mock()
  374. form_data = {"include_time": True}
  375. test_viz = viz.TableViz(datasource, form_data)
  376. with self.assertRaises(Exception):
  377. test_viz.should_be_timeseries()
  378. class DistBarVizTestCase(SupersetTestCase):
  379. def test_groupby_nulls(self):
  380. form_data = {
  381. "metrics": ["votes"],
  382. "adhoc_filters": [],
  383. "groupby": ["toppings"],
  384. "columns": [],
  385. }
  386. datasource = self.get_datasource_mock()
  387. df = pd.DataFrame(
  388. {
  389. "toppings": ["cheese", "pepperoni", "anchovies", None],
  390. "votes": [3, 5, 1, 2],
  391. }
  392. )
  393. test_viz = viz.DistributionBarViz(datasource, form_data)
  394. data = test_viz.get_data(df)[0]
  395. self.assertEqual("votes", data["key"])
  396. expected_values = [
  397. {"x": "pepperoni", "y": 5},
  398. {"x": "cheese", "y": 3},
  399. {"x": NULL_STRING, "y": 2},
  400. {"x": "anchovies", "y": 1},
  401. ]
  402. self.assertEqual(expected_values, data["values"])
  403. def test_groupby_nans(self):
  404. form_data = {
  405. "metrics": ["count"],
  406. "adhoc_filters": [],
  407. "groupby": ["beds"],
  408. "columns": [],
  409. }
  410. datasource = self.get_datasource_mock()
  411. df = pd.DataFrame({"beds": [0, 1, nan, 2], "count": [30, 42, 3, 29]})
  412. test_viz = viz.DistributionBarViz(datasource, form_data)
  413. data = test_viz.get_data(df)[0]
  414. self.assertEqual("count", data["key"])
  415. expected_values = [
  416. {"x": "1.0", "y": 42},
  417. {"x": "0.0", "y": 30},
  418. {"x": "2.0", "y": 29},
  419. {"x": NULL_STRING, "y": 3},
  420. ]
  421. self.assertEqual(expected_values, data["values"])
  422. def test_column_nulls(self):
  423. form_data = {
  424. "metrics": ["votes"],
  425. "adhoc_filters": [],
  426. "groupby": ["toppings"],
  427. "columns": ["role"],
  428. }
  429. datasource = self.get_datasource_mock()
  430. df = pd.DataFrame(
  431. {
  432. "toppings": ["cheese", "pepperoni", "cheese", "pepperoni"],
  433. "role": ["engineer", "engineer", None, None],
  434. "votes": [3, 5, 1, 2],
  435. }
  436. )
  437. test_viz = viz.DistributionBarViz(datasource, form_data)
  438. data = test_viz.get_data(df)
  439. expected = [
  440. {
  441. "key": NULL_STRING,
  442. "values": [{"x": "pepperoni", "y": 2}, {"x": "cheese", "y": 1}],
  443. },
  444. {
  445. "key": "engineer",
  446. "values": [{"x": "pepperoni", "y": 5}, {"x": "cheese", "y": 3}],
  447. },
  448. ]
  449. self.assertEqual(expected, data)
  450. class PairedTTestTestCase(SupersetTestCase):
  451. def test_get_data_transforms_dataframe(self):
  452. form_data = {
  453. "groupby": ["groupA", "groupB", "groupC"],
  454. "metrics": ["metric1", "metric2", "metric3"],
  455. }
  456. datasource = self.get_datasource_mock()
  457. # Test data
  458. raw = {}
  459. raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
  460. raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
  461. raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
  462. raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
  463. raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
  464. raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
  465. raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
  466. df = pd.DataFrame(raw)
  467. pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data)
  468. data = pairedTTestViz.get_data(df)
  469. # Check method correctly transforms data
  470. expected = {
  471. "metric1": [
  472. {
  473. "values": [
  474. {"x": 100, "y": 1},
  475. {"x": 200, "y": 2},
  476. {"x": 300, "y": 3},
  477. ],
  478. "group": ("a1", "a2", "a3"),
  479. },
  480. {
  481. "values": [
  482. {"x": 100, "y": 4},
  483. {"x": 200, "y": 5},
  484. {"x": 300, "y": 6},
  485. ],
  486. "group": ("b1", "b2", "b3"),
  487. },
  488. {
  489. "values": [
  490. {"x": 100, "y": 7},
  491. {"x": 200, "y": 8},
  492. {"x": 300, "y": 9},
  493. ],
  494. "group": ("c1", "c2", "c3"),
  495. },
  496. ],
  497. "metric2": [
  498. {
  499. "values": [
  500. {"x": 100, "y": 10},
  501. {"x": 200, "y": 20},
  502. {"x": 300, "y": 30},
  503. ],
  504. "group": ("a1", "a2", "a3"),
  505. },
  506. {
  507. "values": [
  508. {"x": 100, "y": 40},
  509. {"x": 200, "y": 50},
  510. {"x": 300, "y": 60},
  511. ],
  512. "group": ("b1", "b2", "b3"),
  513. },
  514. {
  515. "values": [
  516. {"x": 100, "y": 70},
  517. {"x": 200, "y": 80},
  518. {"x": 300, "y": 90},
  519. ],
  520. "group": ("c1", "c2", "c3"),
  521. },
  522. ],
  523. "metric3": [
  524. {
  525. "values": [
  526. {"x": 100, "y": 100},
  527. {"x": 200, "y": 200},
  528. {"x": 300, "y": 300},
  529. ],
  530. "group": ("a1", "a2", "a3"),
  531. },
  532. {
  533. "values": [
  534. {"x": 100, "y": 400},
  535. {"x": 200, "y": 500},
  536. {"x": 300, "y": 600},
  537. ],
  538. "group": ("b1", "b2", "b3"),
  539. },
  540. {
  541. "values": [
  542. {"x": 100, "y": 700},
  543. {"x": 200, "y": 800},
  544. {"x": 300, "y": 900},
  545. ],
  546. "group": ("c1", "c2", "c3"),
  547. },
  548. ],
  549. }
  550. self.assertEqual(data, expected)
  551. def test_get_data_empty_null_keys(self):
  552. form_data = {"groupby": [], "metrics": ["", None]}
  553. datasource = self.get_datasource_mock()
  554. # Test data
  555. raw = {}
  556. raw[DTTM_ALIAS] = [100, 200, 300]
  557. raw[""] = [1, 2, 3]
  558. raw[None] = [10, 20, 30]
  559. df = pd.DataFrame(raw)
  560. pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data)
  561. data = pairedTTestViz.get_data(df)
  562. # Check method correctly transforms data
  563. expected = {
  564. "N/A": [
  565. {
  566. "values": [
  567. {"x": 100, "y": 1},
  568. {"x": 200, "y": 2},
  569. {"x": 300, "y": 3},
  570. ],
  571. "group": "All",
  572. }
  573. ],
  574. "NULL": [
  575. {
  576. "values": [
  577. {"x": 100, "y": 10},
  578. {"x": 200, "y": 20},
  579. {"x": 300, "y": 30},
  580. ],
  581. "group": "All",
  582. }
  583. ],
  584. }
  585. self.assertEqual(data, expected)
  586. class PartitionVizTestCase(SupersetTestCase):
  587. @patch("superset.viz.BaseViz.query_obj")
  588. def test_query_obj_time_series_option(self, super_query_obj):
  589. datasource = self.get_datasource_mock()
  590. form_data = {}
  591. test_viz = viz.PartitionViz(datasource, form_data)
  592. super_query_obj.return_value = {}
  593. query_obj = test_viz.query_obj()
  594. self.assertFalse(query_obj["is_timeseries"])
  595. test_viz.form_data["time_series_option"] = "agg_sum"
  596. query_obj = test_viz.query_obj()
  597. self.assertTrue(query_obj["is_timeseries"])
  598. def test_levels_for_computes_levels(self):
  599. raw = {}
  600. raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
  601. raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
  602. raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
  603. raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
  604. raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
  605. raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
  606. raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
  607. df = pd.DataFrame(raw)
  608. groups = ["groupA", "groupB", "groupC"]
  609. time_op = "agg_sum"
  610. test_viz = viz.PartitionViz(Mock(), {})
  611. levels = test_viz.levels_for(time_op, groups, df)
  612. self.assertEqual(4, len(levels))
  613. expected = {DTTM_ALIAS: 1800, "metric1": 45, "metric2": 450, "metric3": 4500}
  614. self.assertEqual(expected, levels[0].to_dict())
  615. expected = {
  616. DTTM_ALIAS: {"a1": 600, "b1": 600, "c1": 600},
  617. "metric1": {"a1": 6, "b1": 15, "c1": 24},
  618. "metric2": {"a1": 60, "b1": 150, "c1": 240},
  619. "metric3": {"a1": 600, "b1": 1500, "c1": 2400},
  620. }
  621. self.assertEqual(expected, levels[1].to_dict())
  622. self.assertEqual(["groupA", "groupB"], levels[2].index.names)
  623. self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
  624. time_op = "agg_mean"
  625. levels = test_viz.levels_for(time_op, groups, df)
  626. self.assertEqual(4, len(levels))
  627. expected = {
  628. DTTM_ALIAS: 200.0,
  629. "metric1": 5.0,
  630. "metric2": 50.0,
  631. "metric3": 500.0,
  632. }
  633. self.assertEqual(expected, levels[0].to_dict())
  634. expected = {
  635. DTTM_ALIAS: {"a1": 200, "c1": 200, "b1": 200},
  636. "metric1": {"a1": 2, "b1": 5, "c1": 8},
  637. "metric2": {"a1": 20, "b1": 50, "c1": 80},
  638. "metric3": {"a1": 200, "b1": 500, "c1": 800},
  639. }
  640. self.assertEqual(expected, levels[1].to_dict())
  641. self.assertEqual(["groupA", "groupB"], levels[2].index.names)
  642. self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
  643. def test_levels_for_diff_computes_difference(self):
  644. raw = {}
  645. raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
  646. raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
  647. raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
  648. raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
  649. raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
  650. raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
  651. raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
  652. df = pd.DataFrame(raw)
  653. groups = ["groupA", "groupB", "groupC"]
  654. test_viz = viz.PartitionViz(Mock(), {})
  655. time_op = "point_diff"
  656. levels = test_viz.levels_for_diff(time_op, groups, df)
  657. expected = {"metric1": 6, "metric2": 60, "metric3": 600}
  658. self.assertEqual(expected, levels[0].to_dict())
  659. expected = {
  660. "metric1": {"a1": 2, "b1": 2, "c1": 2},
  661. "metric2": {"a1": 20, "b1": 20, "c1": 20},
  662. "metric3": {"a1": 200, "b1": 200, "c1": 200},
  663. }
  664. self.assertEqual(expected, levels[1].to_dict())
  665. self.assertEqual(4, len(levels))
  666. self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
  667. def test_levels_for_time_calls_process_data_and_drops_cols(self):
  668. raw = {}
  669. raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
  670. raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
  671. raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
  672. raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
  673. raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
  674. raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
  675. raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
  676. df = pd.DataFrame(raw)
  677. groups = ["groupA", "groupB", "groupC"]
  678. test_viz = viz.PartitionViz(Mock(), {"groupby": groups})
  679. def return_args(df_drop, aggregate):
  680. return df_drop
  681. test_viz.process_data = Mock(side_effect=return_args)
  682. levels = test_viz.levels_for_time(groups, df)
  683. self.assertEqual(4, len(levels))
  684. cols = [DTTM_ALIAS, "metric1", "metric2", "metric3"]
  685. self.assertEqual(sorted(cols), sorted(levels[0].columns.tolist()))
  686. cols += ["groupA"]
  687. self.assertEqual(sorted(cols), sorted(levels[1].columns.tolist()))
  688. cols += ["groupB"]
  689. self.assertEqual(sorted(cols), sorted(levels[2].columns.tolist()))
  690. cols += ["groupC"]
  691. self.assertEqual(sorted(cols), sorted(levels[3].columns.tolist()))
  692. self.assertEqual(4, len(test_viz.process_data.mock_calls))
  693. def test_nest_values_returns_hierarchy(self):
  694. raw = {}
  695. raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
  696. raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
  697. raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
  698. raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
  699. raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
  700. raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
  701. df = pd.DataFrame(raw)
  702. test_viz = viz.PartitionViz(Mock(), {})
  703. groups = ["groupA", "groupB", "groupC"]
  704. levels = test_viz.levels_for("agg_sum", groups, df)
  705. nest = test_viz.nest_values(levels)
  706. self.assertEqual(3, len(nest))
  707. for i in range(0, 3):
  708. self.assertEqual("metric" + str(i + 1), nest[i]["name"])
  709. self.assertEqual(3, len(nest[0]["children"]))
  710. self.assertEqual(1, len(nest[0]["children"][0]["children"]))
  711. self.assertEqual(1, len(nest[0]["children"][0]["children"][0]["children"]))
  712. def test_nest_procs_returns_hierarchy(self):
  713. raw = {}
  714. raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
  715. raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
  716. raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
  717. raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
  718. raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
  719. raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
  720. raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
  721. df = pd.DataFrame(raw)
  722. test_viz = viz.PartitionViz(Mock(), {})
  723. groups = ["groupA", "groupB", "groupC"]
  724. metrics = ["metric1", "metric2", "metric3"]
  725. procs = {}
  726. for i in range(0, 4):
  727. df_drop = df.drop(groups[i:], 1)
  728. pivot = df_drop.pivot_table(
  729. index=DTTM_ALIAS, columns=groups[:i], values=metrics
  730. )
  731. procs[i] = pivot
  732. nest = test_viz.nest_procs(procs)
  733. self.assertEqual(3, len(nest))
  734. for i in range(0, 3):
  735. self.assertEqual("metric" + str(i + 1), nest[i]["name"])
  736. self.assertEqual(None, nest[i].get("val"))
  737. self.assertEqual(3, len(nest[0]["children"]))
  738. self.assertEqual(3, len(nest[0]["children"][0]["children"]))
  739. self.assertEqual(1, len(nest[0]["children"][0]["children"][0]["children"]))
  740. self.assertEqual(
  741. 1, len(nest[0]["children"][0]["children"][0]["children"][0]["children"])
  742. )
  743. def test_get_data_calls_correct_method(self):
  744. test_viz = viz.PartitionViz(Mock(), {})
  745. df = Mock()
  746. with self.assertRaises(ValueError):
  747. test_viz.get_data(df)
  748. test_viz.levels_for = Mock(return_value=1)
  749. test_viz.nest_values = Mock(return_value=1)
  750. test_viz.form_data["groupby"] = ["groups"]
  751. test_viz.form_data["time_series_option"] = "not_time"
  752. test_viz.get_data(df)
  753. self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[0][1][0])
  754. test_viz.form_data["time_series_option"] = "agg_sum"
  755. test_viz.get_data(df)
  756. self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[1][1][0])
  757. test_viz.form_data["time_series_option"] = "agg_mean"
  758. test_viz.get_data(df)
  759. self.assertEqual("agg_mean", test_viz.levels_for.mock_calls[2][1][0])
  760. test_viz.form_data["time_series_option"] = "point_diff"
  761. test_viz.levels_for_diff = Mock(return_value=1)
  762. test_viz.get_data(df)
  763. self.assertEqual("point_diff", test_viz.levels_for_diff.mock_calls[0][1][0])
  764. test_viz.form_data["time_series_option"] = "point_percent"
  765. test_viz.get_data(df)
  766. self.assertEqual("point_percent", test_viz.levels_for_diff.mock_calls[1][1][0])
  767. test_viz.form_data["time_series_option"] = "point_factor"
  768. test_viz.get_data(df)
  769. self.assertEqual("point_factor", test_viz.levels_for_diff.mock_calls[2][1][0])
  770. test_viz.levels_for_time = Mock(return_value=1)
  771. test_viz.nest_procs = Mock(return_value=1)
  772. test_viz.form_data["time_series_option"] = "adv_anal"
  773. test_viz.get_data(df)
  774. self.assertEqual(1, len(test_viz.levels_for_time.mock_calls))
  775. self.assertEqual(1, len(test_viz.nest_procs.mock_calls))
  776. test_viz.form_data["time_series_option"] = "time_series"
  777. test_viz.get_data(df)
  778. self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[3][1][0])
  779. self.assertEqual(7, len(test_viz.nest_values.mock_calls))
  780. class RoseVisTestCase(SupersetTestCase):
  781. def test_rose_vis_get_data(self):
  782. raw = {}
  783. t1 = pd.Timestamp("2000")
  784. t2 = pd.Timestamp("2002")
  785. t3 = pd.Timestamp("2004")
  786. raw[DTTM_ALIAS] = [t1, t2, t3, t1, t2, t3, t1, t2, t3]
  787. raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
  788. raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
  789. raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
  790. raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
  791. df = pd.DataFrame(raw)
  792. fd = {"metrics": ["metric1"], "groupby": ["groupA"]}
  793. test_viz = viz.RoseViz(Mock(), fd)
  794. test_viz.metrics = fd["metrics"]
  795. res = test_viz.get_data(df)
  796. expected = {
  797. 946684800000000000: [
  798. {"time": t1, "value": 1, "key": ("a1",), "name": ("a1",)},
  799. {"time": t1, "value": 4, "key": ("b1",), "name": ("b1",)},
  800. {"time": t1, "value": 7, "key": ("c1",), "name": ("c1",)},
  801. ],
  802. 1009843200000000000: [
  803. {"time": t2, "value": 2, "key": ("a1",), "name": ("a1",)},
  804. {"time": t2, "value": 5, "key": ("b1",), "name": ("b1",)},
  805. {"time": t2, "value": 8, "key": ("c1",), "name": ("c1",)},
  806. ],
  807. 1072915200000000000: [
  808. {"time": t3, "value": 3, "key": ("a1",), "name": ("a1",)},
  809. {"time": t3, "value": 6, "key": ("b1",), "name": ("b1",)},
  810. {"time": t3, "value": 9, "key": ("c1",), "name": ("c1",)},
  811. ],
  812. }
  813. self.assertEqual(expected, res)
  814. class TimeSeriesTableVizTestCase(SupersetTestCase):
  815. def test_get_data_metrics(self):
  816. form_data = {"metrics": ["sum__A", "count"], "groupby": []}
  817. datasource = self.get_datasource_mock()
  818. raw = {}
  819. t1 = pd.Timestamp("2000")
  820. t2 = pd.Timestamp("2002")
  821. raw[DTTM_ALIAS] = [t1, t2]
  822. raw["sum__A"] = [15, 20]
  823. raw["count"] = [6, 7]
  824. df = pd.DataFrame(raw)
  825. test_viz = viz.TimeTableViz(datasource, form_data)
  826. data = test_viz.get_data(df)
  827. # Check method correctly transforms data
  828. self.assertEqual(set(["count", "sum__A"]), set(data["columns"]))
  829. time_format = "%Y-%m-%d %H:%M:%S"
  830. expected = {
  831. t1.strftime(time_format): {"sum__A": 15, "count": 6},
  832. t2.strftime(time_format): {"sum__A": 20, "count": 7},
  833. }
  834. self.assertEqual(expected, data["records"])
  835. def test_get_data_group_by(self):
  836. form_data = {"metrics": ["sum__A"], "groupby": ["groupby1"]}
  837. datasource = self.get_datasource_mock()
  838. raw = {}
  839. t1 = pd.Timestamp("2000")
  840. t2 = pd.Timestamp("2002")
  841. raw[DTTM_ALIAS] = [t1, t1, t1, t2, t2, t2]
  842. raw["sum__A"] = [15, 20, 25, 30, 35, 40]
  843. raw["groupby1"] = ["a1", "a2", "a3", "a1", "a2", "a3"]
  844. df = pd.DataFrame(raw)
  845. test_viz = viz.TimeTableViz(datasource, form_data)
  846. data = test_viz.get_data(df)
  847. # Check method correctly transforms data
  848. self.assertEqual(set(["a1", "a2", "a3"]), set(data["columns"]))
  849. time_format = "%Y-%m-%d %H:%M:%S"
  850. expected = {
  851. t1.strftime(time_format): {"a1": 15, "a2": 20, "a3": 25},
  852. t2.strftime(time_format): {"a1": 30, "a2": 35, "a3": 40},
  853. }
  854. self.assertEqual(expected, data["records"])
  855. @patch("superset.viz.BaseViz.query_obj")
  856. def test_query_obj_throws_metrics_and_groupby(self, super_query_obj):
  857. datasource = self.get_datasource_mock()
  858. form_data = {"groupby": ["a"]}
  859. super_query_obj.return_value = {}
  860. test_viz = viz.TimeTableViz(datasource, form_data)
  861. with self.assertRaises(Exception):
  862. test_viz.query_obj()
  863. form_data["metrics"] = ["x", "y"]
  864. test_viz = viz.TimeTableViz(datasource, form_data)
  865. with self.assertRaises(Exception):
  866. test_viz.query_obj()
  867. class BaseDeckGLVizTestCase(SupersetTestCase):
  868. def test_get_metrics(self):
  869. form_data = load_fixture("deck_path_form_data.json")
  870. datasource = self.get_datasource_mock()
  871. test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
  872. result = test_viz_deckgl.get_metrics()
  873. assert result == [form_data.get("size")]
  874. form_data = {}
  875. test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
  876. result = test_viz_deckgl.get_metrics()
  877. assert result == []
  878. def test_scatterviz_get_metrics(self):
  879. form_data = load_fixture("deck_path_form_data.json")
  880. datasource = self.get_datasource_mock()
  881. form_data = {}
  882. test_viz_deckgl = viz.DeckScatterViz(datasource, form_data)
  883. test_viz_deckgl.point_radius_fixed = {"type": "metric", "value": "int"}
  884. result = test_viz_deckgl.get_metrics()
  885. assert result == ["int"]
  886. form_data = {}
  887. test_viz_deckgl = viz.DeckScatterViz(datasource, form_data)
  888. test_viz_deckgl.point_radius_fixed = {}
  889. result = test_viz_deckgl.get_metrics()
  890. assert result is None
  891. def test_get_js_columns(self):
  892. form_data = load_fixture("deck_path_form_data.json")
  893. datasource = self.get_datasource_mock()
  894. mock_d = {"a": "dummy1", "b": "dummy2", "c": "dummy3"}
  895. test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
  896. result = test_viz_deckgl.get_js_columns(mock_d)
  897. assert result == {"color": None}
  898. def test_get_properties(self):
  899. mock_d = {}
  900. form_data = load_fixture("deck_path_form_data.json")
  901. datasource = self.get_datasource_mock()
  902. test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
  903. with self.assertRaises(NotImplementedError) as context:
  904. test_viz_deckgl.get_properties(mock_d)
  905. self.assertTrue("" in str(context.exception))
  906. def test_process_spatial_query_obj(self):
  907. form_data = load_fixture("deck_path_form_data.json")
  908. datasource = self.get_datasource_mock()
  909. mock_key = "spatial_key"
  910. mock_gb = []
  911. test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
  912. with self.assertRaises(ValueError) as context:
  913. test_viz_deckgl.process_spatial_query_obj(mock_key, mock_gb)
  914. self.assertTrue("Bad spatial key" in str(context.exception))
  915. test_form_data = {
  916. "latlong_key": {"type": "latlong", "lonCol": "lon", "latCol": "lat"},
  917. "delimited_key": {"type": "delimited", "lonlatCol": "lonlat"},
  918. "geohash_key": {"type": "geohash", "geohashCol": "geo"},
  919. }
  920. datasource = self.get_datasource_mock()
  921. expected_results = {
  922. "latlong_key": ["lon", "lat"],
  923. "delimited_key": ["lonlat"],
  924. "geohash_key": ["geo"],
  925. }
  926. for mock_key in ["latlong_key", "delimited_key", "geohash_key"]:
  927. mock_gb = []
  928. test_viz_deckgl = viz.BaseDeckGLViz(datasource, test_form_data)
  929. test_viz_deckgl.process_spatial_query_obj(mock_key, mock_gb)
  930. assert expected_results.get(mock_key) == mock_gb
  931. def test_geojson_query_obj(self):
  932. form_data = load_fixture("deck_geojson_form_data.json")
  933. datasource = self.get_datasource_mock()
  934. test_viz_deckgl = viz.DeckGeoJson(datasource, form_data)
  935. results = test_viz_deckgl.query_obj()
  936. assert results["metrics"] == []
  937. assert results["groupby"] == []
  938. assert results["columns"] == ["test_col"]
  939. def test_parse_coordinates(self):
  940. form_data = load_fixture("deck_path_form_data.json")
  941. datasource = self.get_datasource_mock()
  942. viz_instance = viz.BaseDeckGLViz(datasource, form_data)
  943. coord = viz_instance.parse_coordinates("1.23, 3.21")
  944. self.assertEqual(coord, (1.23, 3.21))
  945. coord = viz_instance.parse_coordinates("1.23 3.21")
  946. self.assertEqual(coord, (1.23, 3.21))
  947. self.assertEqual(viz_instance.parse_coordinates(None), None)
  948. self.assertEqual(viz_instance.parse_coordinates(""), None)
  949. def test_parse_coordinates_raises(self):
  950. form_data = load_fixture("deck_path_form_data.json")
  951. datasource = self.get_datasource_mock()
  952. test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
  953. with self.assertRaises(SpatialException):
  954. test_viz_deckgl.parse_coordinates("NULL")
  955. with self.assertRaises(SpatialException):
  956. test_viz_deckgl.parse_coordinates("fldkjsalkj,fdlaskjfjadlksj")
  957. @patch("superset.utils.core.uuid.uuid4")
  958. def test_filter_nulls(self, mock_uuid4):
  959. mock_uuid4.return_value = uuid.UUID("12345678123456781234567812345678")
  960. test_form_data = {
  961. "latlong_key": {"type": "latlong", "lonCol": "lon", "latCol": "lat"},
  962. "delimited_key": {"type": "delimited", "lonlatCol": "lonlat"},
  963. "geohash_key": {"type": "geohash", "geohashCol": "geo"},
  964. }
  965. datasource = self.get_datasource_mock()
  966. expected_results = {
  967. "latlong_key": [
  968. {
  969. "clause": "WHERE",
  970. "expressionType": "SIMPLE",
  971. "filterOptionName": "12345678-1234-5678-1234-567812345678",
  972. "comparator": "",
  973. "operator": "IS NOT NULL",
  974. "subject": "lat",
  975. },
  976. {
  977. "clause": "WHERE",
  978. "expressionType": "SIMPLE",
  979. "filterOptionName": "12345678-1234-5678-1234-567812345678",
  980. "comparator": "",
  981. "operator": "IS NOT NULL",
  982. "subject": "lon",
  983. },
  984. ],
  985. "delimited_key": [
  986. {
  987. "clause": "WHERE",
  988. "expressionType": "SIMPLE",
  989. "filterOptionName": "12345678-1234-5678-1234-567812345678",
  990. "comparator": "",
  991. "operator": "IS NOT NULL",
  992. "subject": "lonlat",
  993. }
  994. ],
  995. "geohash_key": [
  996. {
  997. "clause": "WHERE",
  998. "expressionType": "SIMPLE",
  999. "filterOptionName": "12345678-1234-5678-1234-567812345678",
  1000. "comparator": "",
  1001. "operator": "IS NOT NULL",
  1002. "subject": "geo",
  1003. }
  1004. ],
  1005. }
  1006. for mock_key in ["latlong_key", "delimited_key", "geohash_key"]:
  1007. test_viz_deckgl = viz.BaseDeckGLViz(datasource, test_form_data.copy())
  1008. test_viz_deckgl.spatial_control_keys = [mock_key]
  1009. test_viz_deckgl.add_null_filters()
  1010. adhoc_filters = test_viz_deckgl.form_data["adhoc_filters"]
  1011. assert expected_results.get(mock_key) == adhoc_filters
  1012. class TimeSeriesVizTestCase(SupersetTestCase):
  1013. def test_timeseries_unicode_data(self):
  1014. datasource = self.get_datasource_mock()
  1015. form_data = {"groupby": ["name"], "metrics": ["sum__payout"]}
  1016. raw = {}
  1017. raw["name"] = [
  1018. "Real Madrid C.F.🇺🇸🇬🇧",
  1019. "Real Madrid C.F.🇺🇸🇬🇧",
  1020. "Real Madrid Basket",
  1021. "Real Madrid Basket",
  1022. ]
  1023. raw["__timestamp"] = [
  1024. "2018-02-20T00:00:00",
  1025. "2018-03-09T00:00:00",
  1026. "2018-02-20T00:00:00",
  1027. "2018-03-09T00:00:00",
  1028. ]
  1029. raw["sum__payout"] = [2, 2, 4, 4]
  1030. df = pd.DataFrame(raw)
  1031. test_viz = viz.NVD3TimeSeriesViz(datasource, form_data)
  1032. viz_data = {}
  1033. viz_data = test_viz.get_data(df)
  1034. expected = [
  1035. {
  1036. u"values": [
  1037. {u"y": 4, u"x": u"2018-02-20T00:00:00"},
  1038. {u"y": 4, u"x": u"2018-03-09T00:00:00"},
  1039. ],
  1040. u"key": (u"Real Madrid Basket",),
  1041. },
  1042. {
  1043. u"values": [
  1044. {u"y": 2, u"x": u"2018-02-20T00:00:00"},
  1045. {u"y": 2, u"x": u"2018-03-09T00:00:00"},
  1046. ],
  1047. u"key": (u"Real Madrid C.F.\U0001f1fa\U0001f1f8\U0001f1ec\U0001f1e7",),
  1048. },
  1049. ]
  1050. self.assertEqual(expected, viz_data)
  1051. def test_process_data_resample(self):
  1052. datasource = self.get_datasource_mock()
  1053. df = pd.DataFrame(
  1054. {
  1055. "__timestamp": pd.to_datetime(
  1056. ["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
  1057. ),
  1058. "y": [1.0, 2.0, 5.0, 7.0],
  1059. }
  1060. )
  1061. self.assertEqual(
  1062. viz.NVD3TimeSeriesViz(
  1063. datasource,
  1064. {"metrics": ["y"], "resample_method": "sum", "resample_rule": "1D"},
  1065. )
  1066. .process_data(df)["y"]
  1067. .tolist(),
  1068. [1.0, 2.0, 0.0, 0.0, 5.0, 0.0, 7.0],
  1069. )
  1070. np.testing.assert_equal(
  1071. viz.NVD3TimeSeriesViz(
  1072. datasource,
  1073. {"metrics": ["y"], "resample_method": "asfreq", "resample_rule": "1D"},
  1074. )
  1075. .process_data(df)["y"]
  1076. .tolist(),
  1077. [1.0, 2.0, np.nan, np.nan, 5.0, np.nan, 7.0],
  1078. )