Spaces:
Running
Running
Add an introduction with some more handholding and mention NaN
Browse files- polars/11_missing_data.py +274 -38
polars/11_missing_data.py
CHANGED
|
@@ -20,7 +20,190 @@ def _(mo):
|
|
| 20 |
|
| 21 |
_by [etrotta](https://github.com/etrotta)_
|
| 22 |
|
| 23 |
-
This notebook covers some common problems you may face when dealing with real datasets and techniques used to solve deal with them,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
"""
|
| 25 |
)
|
| 26 |
return
|
|
@@ -30,7 +213,9 @@ def _(mo):
|
|
| 30 |
def _(mo):
|
| 31 |
mo.md(
|
| 32 |
r"""
|
| 33 |
-
|
|
|
|
|
|
|
| 34 |
|
| 35 |
- Contains multiple stations covering the Municipality of Rio de Janeiro
|
| 36 |
- Measures the precipitation as milimeters, with a granularity of 15 minutes
|
|
@@ -133,7 +318,7 @@ def _(dirty_weather, mo, rain):
|
|
| 133 |
_missing_count = dirty_weather.select(rain.is_null().sum()).item()
|
| 134 |
|
| 135 |
mo.md(
|
| 136 |
-
f"As you can see, there are {_missing_count:,} rows missing the accumulated rain for a period.\n\nThat could be
|
| 137 |
)
|
| 138 |
return
|
| 139 |
|
|
@@ -160,7 +345,7 @@ def _(mo):
|
|
| 160 |
|
| 161 |
### Last option to fixing it: Acquire the correct values from elsewhere.
|
| 162 |
|
| 163 |
-
|
| 164 |
|
| 165 |
### However
|
| 166 |
|
|
@@ -207,6 +392,8 @@ def _(mo):
|
|
| 207 |
|
| 208 |
That difference could be caused by the same factors as null values, or even by someone dropping null values along the way, but for the purposes of this notebook let's say that we want to have values for each combination with no exceptions, so we'll have to make reasonable assumptions to interpolate and extrapolate them.
|
| 209 |
|
|
|
|
|
|
|
| 210 |
Given that we are working with time series data, we will [upsample](https://docs.pola.rs/api/python/stable/reference/dataframe/api/polars.DataFrame.upsample.html) the data, but you could also create a DataFrame containing all expected rows then use `join(how="...")`
|
| 211 |
|
| 212 |
However, that will give us _even more_ null values, so we will want to fill them in afterwards. For this case, we will just use a forward fill followed by a backwards fill.
|
|
@@ -245,52 +432,63 @@ def _(dirty_weather, mo, pl, rain):
|
|
| 245 |
|
| 246 |
@app.cell(hide_code=True)
|
| 247 |
def _(mo):
|
| 248 |
-
mo.md(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 249 |
return
|
| 250 |
|
| 251 |
|
| 252 |
@app.cell(hide_code=True)
|
| 253 |
def _(mo):
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 259 |
)
|
| 260 |
|
| 261 |
-
mo.
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
interval,
|
| 267 |
-
]
|
| 268 |
-
)
|
| 269 |
-
return day_slider, hour_slider, interval, year_picker
|
| 270 |
|
| 271 |
|
| 272 |
@app.cell
|
| 273 |
-
def _(
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
pl,
|
| 278 |
-
rain,
|
| 279 |
-
stations,
|
| 280 |
-
weather,
|
| 281 |
-
year_picker,
|
| 282 |
-
):
|
| 283 |
-
_range_seconds = map(lambda hour: hour * 3600, hour_slider.value)
|
| 284 |
_df_seconds = pl.col("datetime").dt.hour() + pl.col("datetime").dt.minute().mul(60)
|
| 285 |
|
| 286 |
animation_data = (
|
| 287 |
weather.lazy()
|
| 288 |
.filter(
|
| 289 |
-
pl.col("datetime").dt.year() ==
|
| 290 |
-
pl.col("datetime").dt.ordinal_day().is_between(*
|
| 291 |
_df_seconds.is_between(*_range_seconds),
|
| 292 |
)
|
| 293 |
-
.group_by_dynamic("datetime", group_by="station", every=
|
| 294 |
.agg(rain.sum().alias("precipitation"))
|
| 295 |
.remove(pl.col("precipitation").eq(0).all().over("station"))
|
| 296 |
.join(stations.lazy(), on="station")
|
|
@@ -369,13 +567,13 @@ def _(mo):
|
|
| 369 |
def _(mo):
|
| 370 |
mo.md(
|
| 371 |
r"""
|
| 372 |
-
|
| 373 |
|
| 374 |
-
|
| 375 |
|
| 376 |
The original dataset contained naive datetimes instead of timezone-aware, but we can infer whenever it refers to UTC time or local time (for this case, -03:00 UTC) based on the measurements.
|
| 377 |
|
| 378 |
-
For example, we can select one specific interval during which we know rained a lot, or graph the average amount of precipitation for each hour of the day, then compare the data timestamps with a ground truth.
|
| 379 |
"""
|
| 380 |
)
|
| 381 |
return
|
|
@@ -438,7 +636,7 @@ def _(mo):
|
|
| 438 |
r"""
|
| 439 |
By externally researching the expected distribution and looking up some of the extreme weather events, we can come to a conclusion about whenever it is aligned with the local time or with UTC.
|
| 440 |
|
| 441 |
-
In this case, the distribution matches the normal weather for this region and we can see that the hours with the most precipitation match those of historical events, so it is safe to say it is using Americas/São Paulo time zone.
|
| 442 |
"""
|
| 443 |
)
|
| 444 |
return
|
|
@@ -460,7 +658,45 @@ def _(dirty_weather_naive, pl):
|
|
| 460 |
def _(mo):
|
| 461 |
mo.md(
|
| 462 |
r"""
|
| 463 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 464 |
|
| 465 |
Loading data and imports
|
| 466 |
"""
|
|
|
|
| 20 |
|
| 21 |
_by [etrotta](https://github.com/etrotta)_
|
| 22 |
|
| 23 |
+
This notebook covers some common problems you may face when dealing with real datasets and techniques used to solve deal with them, showcasing polars functionalities to handle missing data.
|
| 24 |
+
|
| 25 |
+
First we provide an overview of the methods available in polars, then we walk through a mini case study with real world data showing how to use it, and at last we provide some additional information in the 'Bonus Content' section.
|
| 26 |
+
You can navigate to skip around to each header using the menu on the right side
|
| 27 |
+
"""
|
| 28 |
+
)
|
| 29 |
+
return
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@app.cell(hide_code=True)
|
| 33 |
+
def _(mo):
|
| 34 |
+
mo.md(
|
| 35 |
+
r"""
|
| 36 |
+
## Methods for working with Nulls
|
| 37 |
+
|
| 38 |
+
We'll be using the following DataFrame to show the most important methods:
|
| 39 |
+
"""
|
| 40 |
+
)
|
| 41 |
+
return
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@app.cell(hide_code=True)
|
| 45 |
+
def _(pl):
|
| 46 |
+
df = pl.DataFrame(
|
| 47 |
+
[
|
| 48 |
+
{"species": "Dog", "name": "Millie", "height": None, "age": 4},
|
| 49 |
+
{"species": "Dog", "name": "Wally", "height": 60, "age": None},
|
| 50 |
+
{"species": "Dog", "name": None, "height": 50, "age": 12},
|
| 51 |
+
{"species": "Cat", "name": "Mini", "height": 15, "age": None},
|
| 52 |
+
{"species": "Cat", "name": None, "height": 25, "age": 6},
|
| 53 |
+
{"species": "Cat", "name": "Kazusa", "height": None, "age": 16},
|
| 54 |
+
]
|
| 55 |
+
)
|
| 56 |
+
df
|
| 57 |
+
return (df,)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@app.cell(hide_code=True)
|
| 61 |
+
def _(mo):
|
| 62 |
+
mo.md(
|
| 63 |
+
r"""
|
| 64 |
+
### Counting nulls
|
| 65 |
+
|
| 66 |
+
A simple yet convenient aggregation
|
| 67 |
+
"""
|
| 68 |
+
)
|
| 69 |
+
return
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@app.cell
|
| 73 |
+
def _(df):
|
| 74 |
+
df.null_count()
|
| 75 |
+
return
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
@app.cell(hide_code=True)
|
| 79 |
+
def _(mo):
|
| 80 |
+
mo.md(
|
| 81 |
+
r"""
|
| 82 |
+
### Dropping Nulls
|
| 83 |
+
|
| 84 |
+
The simplest way of dealing with null values is throwing them away, but that is not always a good idea.
|
| 85 |
+
"""
|
| 86 |
+
)
|
| 87 |
+
return
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
@app.cell
|
| 91 |
+
def _(df):
|
| 92 |
+
df.drop_nulls()
|
| 93 |
+
return
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
@app.cell
|
| 97 |
+
def _(df):
|
| 98 |
+
df.drop_nulls(subset="name")
|
| 99 |
+
return
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
@app.cell
|
| 103 |
+
def _(mo):
|
| 104 |
+
mo.md(
|
| 105 |
+
r"""
|
| 106 |
+
### Filtering null values
|
| 107 |
+
|
| 108 |
+
To filter in polars, you'll typically use `df.filter(expression)` or `df.remove(expression)` methods.
|
| 109 |
+
|
| 110 |
+
Filter will only keep rows in which the expression evaluates to True.
|
| 111 |
+
It will remove not only rows in which it evalutes to False, but also those in which the expression evaluates to None.
|
| 112 |
+
|
| 113 |
+
Remove will only remove rows in which the expression evaluates to True.
|
| 114 |
+
It will keep rows in which it evalutes to None.
|
| 115 |
+
"""
|
| 116 |
+
)
|
| 117 |
+
return
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
@app.cell
|
| 121 |
+
def _(df, pl):
|
| 122 |
+
df.filter(pl.col("age") > 10)
|
| 123 |
+
return
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
@app.cell
|
| 127 |
+
def _(df, pl):
|
| 128 |
+
df.remove(pl.col("age") < 10)
|
| 129 |
+
return
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
@app.cell(hide_code=True)
|
| 133 |
+
def _(mo):
|
| 134 |
+
mo.md(
|
| 135 |
+
r"""
|
| 136 |
+
You may also be tempted to use `== None` or `!= None`, but comparison operators in polars will generally pass null values through.
|
| 137 |
+
|
| 138 |
+
You can use `.eq_missing()` or `.ne_missing()` methods if you want to be strict about it, but there are also `.is_null()` and `.is_not_null()` methods you can use.
|
| 139 |
+
"""
|
| 140 |
+
)
|
| 141 |
+
return
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
@app.cell
|
| 145 |
+
def _(df, pl):
|
| 146 |
+
df.select(
|
| 147 |
+
"name",
|
| 148 |
+
(pl.col("name") == None).alias("Name equals None"),
|
| 149 |
+
(pl.col("name") == "Mini").alias("Name equals Mini"),
|
| 150 |
+
(pl.col("name").eq_missing("Mini")).alias("Name eq_missing Mini"),
|
| 151 |
+
(pl.col("name").is_null()).alias("Name is null"),
|
| 152 |
+
(pl.col("name").is_not_null()).alias("Name is not null"),
|
| 153 |
+
)
|
| 154 |
+
return
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
@app.cell(hide_code=True)
|
| 158 |
+
def _(mo):
|
| 159 |
+
mo.md(
|
| 160 |
+
r"""
|
| 161 |
+
### Filling Null values
|
| 162 |
+
|
| 163 |
+
You can also fill in the values with constants, calculations or by consulting external data sources.
|
| 164 |
+
|
| 165 |
+
Be careful not to treat estimated or guessed values as if they a ground truth however, otherwise you may end up making conclusions about a reality that does not exists.
|
| 166 |
+
"""
|
| 167 |
+
)
|
| 168 |
+
return
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
@app.cell
|
| 172 |
+
def _(df, mo, pl):
|
| 173 |
+
guesstimates = df.with_columns(
|
| 174 |
+
pl.col("height").fill_null(pl.col("height").mean().over("species")),
|
| 175 |
+
pl.col("age").fill_null(0),
|
| 176 |
+
)
|
| 177 |
+
guesstimates = mo.ui.data_editor(
|
| 178 |
+
guesstimates,
|
| 179 |
+
editable_columns=["name"],
|
| 180 |
+
label="Let's guess some values to fill in nulls, then try giving names to the animals with `null` by editing the cells",
|
| 181 |
+
)
|
| 182 |
+
guesstimates
|
| 183 |
+
return (guesstimates,)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
@app.cell
|
| 187 |
+
def _(guesstimates):
|
| 188 |
+
guesstimates.value
|
| 189 |
+
return
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
@app.cell(hide_code=True)
|
| 193 |
+
def _(mo):
|
| 194 |
+
mo.md(
|
| 195 |
+
r"""
|
| 196 |
+
### TL;DR
|
| 197 |
+
|
| 198 |
+
Before we head into the mini case study, a brief review of what we have covered:
|
| 199 |
+
|
| 200 |
+
- use `df.null_counts()` or `expr.is_null()` to count and identify missing values
|
| 201 |
+
- you could just drop rows with values missing in any columns or a subset of them with `df.drop_nulls()`, but for most cases you'll want to be more careful about it
|
| 202 |
+
- take into consideration whenever you want to preserve null values or remove them when choosing between `df.filter()` or `df.remove()`
|
| 203 |
+
- if you don't want to propagate null values, use `_missing` variations of methods such as `eq` vs `eq_missing`
|
| 204 |
+
- you may want to fill in missing values based on calculations via `fill_null`, or manually edit the data based on external documents
|
| 205 |
+
|
| 206 |
+
Whichever approach you take, remember to document how you handled them!
|
| 207 |
"""
|
| 208 |
)
|
| 209 |
return
|
|
|
|
| 213 |
def _(mo):
|
| 214 |
mo.md(
|
| 215 |
r"""
|
| 216 |
+
# Mini Case Study
|
| 217 |
+
|
| 218 |
+
We will be using a dataset from `alertario` about the weather in Rio de Janeiro, originally available in Google Big Query under `datario.clima_pluviometro`. What you need to know about it:
|
| 219 |
|
| 220 |
- Contains multiple stations covering the Municipality of Rio de Janeiro
|
| 221 |
- Measures the precipitation as milimeters, with a granularity of 15 minutes
|
|
|
|
| 318 |
_missing_count = dirty_weather.select(rain.is_null().sum()).item()
|
| 319 |
|
| 320 |
mo.md(
|
| 321 |
+
f"As you can see, there are {_missing_count:,} rows missing the accumulated rain for a period.\n\nThat could be caused by sensor malfunctions, maintenance, bobby tables or a myriad of other reasons. While it may be a small percentage of the data ({_missing_count / len(dirty_weather):.3%}), it is still important to take it in consideration, one way or the other."
|
| 322 |
)
|
| 323 |
return
|
| 324 |
|
|
|
|
| 345 |
|
| 346 |
### Last option to fixing it: Acquire the correct values from elsewhere.
|
| 347 |
|
| 348 |
+
Like manually adding names to the animals in the introduction, but you could try finding approximate values from another dataset or in some cases manually input the correct values.
|
| 349 |
|
| 350 |
### However
|
| 351 |
|
|
|
|
| 392 |
|
| 393 |
That difference could be caused by the same factors as null values, or even by someone dropping null values along the way, but for the purposes of this notebook let's say that we want to have values for each combination with no exceptions, so we'll have to make reasonable assumptions to interpolate and extrapolate them.
|
| 394 |
|
| 395 |
+
### Upsampling
|
| 396 |
+
|
| 397 |
Given that we are working with time series data, we will [upsample](https://docs.pola.rs/api/python/stable/reference/dataframe/api/polars.DataFrame.upsample.html) the data, but you could also create a DataFrame containing all expected rows then use `join(how="...")`
|
| 398 |
|
| 399 |
However, that will give us _even more_ null values, so we will want to fill them in afterwards. For this case, we will just use a forward fill followed by a backwards fill.
|
|
|
|
| 432 |
|
| 433 |
@app.cell(hide_code=True)
|
| 434 |
def _(mo):
|
| 435 |
+
mo.md(
|
| 436 |
+
r"""
|
| 437 |
+
Now that we finally have a clean dataset, let's play around with it a little.
|
| 438 |
+
|
| 439 |
+
### Example App
|
| 440 |
+
|
| 441 |
+
Let's display the amount of precipitation each station measured within a timeframe, aggregated to a lower granularity.
|
| 442 |
+
|
| 443 |
+
First we'll filter by day
|
| 444 |
+
"""
|
| 445 |
+
)
|
| 446 |
return
|
| 447 |
|
| 448 |
|
| 449 |
@app.cell(hide_code=True)
|
| 450 |
def _(mo):
|
| 451 |
+
filters = (
|
| 452 |
+
mo.md(
|
| 453 |
+
"""Filters for the example
|
| 454 |
+
|
| 455 |
+
Year: {year}
|
| 456 |
+
Days of the year: {day}
|
| 457 |
+
Hours of each day: {hour}
|
| 458 |
+
Aggregation granularity: {interval}
|
| 459 |
+
"""
|
| 460 |
+
)
|
| 461 |
+
.batch(
|
| 462 |
+
year=mo.ui.dropdown([2020, 2021, 2022], value=2022),
|
| 463 |
+
day=mo.ui.range_slider(1, 365, show_value=True, full_width=True, value=[87, 94]),
|
| 464 |
+
hour=mo.ui.range_slider(0, 24, 0.25, show_value=True, full_width=True),
|
| 465 |
+
interval=mo.ui.dropdown(["15m", "30m", "1h", "2h", "4h", "6h", "1d", "7d", "30d"], value="4h"),
|
| 466 |
+
)
|
| 467 |
+
.form()
|
| 468 |
)
|
| 469 |
|
| 470 |
+
# Note: You could use `mo.ui.date_range` instead, but I just don't like it myself
|
| 471 |
+
# mo.ui.date_range(start="2020-01-01", stop="2022-12-31", value=["2022-03-28", "2022-04-03"], label="Display range")
|
| 472 |
+
|
| 473 |
+
filters
|
| 474 |
+
return (filters,)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 475 |
|
| 476 |
|
| 477 |
@app.cell
|
| 478 |
+
def _(filters, mo, pl, rain, stations, weather):
|
| 479 |
+
mo.stop(filters.value is None)
|
| 480 |
+
|
| 481 |
+
_range_seconds = map(lambda hour: hour * 3600, filters.value["hour"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 482 |
_df_seconds = pl.col("datetime").dt.hour() + pl.col("datetime").dt.minute().mul(60)
|
| 483 |
|
| 484 |
animation_data = (
|
| 485 |
weather.lazy()
|
| 486 |
.filter(
|
| 487 |
+
pl.col("datetime").dt.year() == filters.value["year"],
|
| 488 |
+
pl.col("datetime").dt.ordinal_day().is_between(*filters.value["day"]),
|
| 489 |
_df_seconds.is_between(*_range_seconds),
|
| 490 |
)
|
| 491 |
+
.group_by_dynamic("datetime", group_by="station", every=filters.value["interval"])
|
| 492 |
.agg(rain.sum().alias("precipitation"))
|
| 493 |
.remove(pl.col("precipitation").eq(0).all().over("station"))
|
| 494 |
.join(stations.lazy(), on="station")
|
|
|
|
| 567 |
def _(mo):
|
| 568 |
mo.md(
|
| 569 |
r"""
|
| 570 |
+
# Bonus Content
|
| 571 |
|
| 572 |
+
## Appendix A: Missing Time Zones
|
| 573 |
|
| 574 |
The original dataset contained naive datetimes instead of timezone-aware, but we can infer whenever it refers to UTC time or local time (for this case, -03:00 UTC) based on the measurements.
|
| 575 |
|
| 576 |
+
For example, we can select one specific interval during which we know that rained a lot, or graph the average amount of precipitation for each hour of the day, then compare the data timestamps with a ground truth.
|
| 577 |
"""
|
| 578 |
)
|
| 579 |
return
|
|
|
|
| 636 |
r"""
|
| 637 |
By externally researching the expected distribution and looking up some of the extreme weather events, we can come to a conclusion about whenever it is aligned with the local time or with UTC.
|
| 638 |
|
| 639 |
+
In this case, the distribution matches the normal weather for this region and we can see that the hours with the most precipitation match those of historical events, so it is safe to say it is using local time (equivalent to the Americas/São Paulo time zone).
|
| 640 |
"""
|
| 641 |
)
|
| 642 |
return
|
|
|
|
| 658 |
def _(mo):
|
| 659 |
mo.md(
|
| 660 |
r"""
|
| 661 |
+
## Appendix B: Not a Number
|
| 662 |
+
|
| 663 |
+
While some other tools without proper support for missing values, as well as datasets made to work with them, may use `NaN` as a way to indicate a value is missing, in polars it is treated exclusively as a float value, much like `0.0`, `1.0` or `infinity`.
|
| 664 |
+
|
| 665 |
+
You can use `.fill_null(float('nan'))` if you need to convert to a format such tools accept, or use `.fill_nan(None)` if you are importing data from them, assuming that there are no values which really are supposed to be the float NaN.
|
| 666 |
+
|
| 667 |
+
Remember that many calculations can result in NaN, for example dividing by zero:
|
| 668 |
+
"""
|
| 669 |
+
)
|
| 670 |
+
return
|
| 671 |
+
|
| 672 |
+
|
| 673 |
+
@app.cell
|
| 674 |
+
def _(dirty_weather, pl, rain):
|
| 675 |
+
day_perc = dirty_weather.select(
|
| 676 |
+
"datetime",
|
| 677 |
+
(rain / rain.sum().over("station", pl.col("datetime").dt.date())).alias("percentage_of_day_precipitation"),
|
| 678 |
+
)
|
| 679 |
+
perc_col = pl.col("percentage_of_day_precipitation")
|
| 680 |
+
|
| 681 |
+
day_perc
|
| 682 |
+
return day_perc, perc_col
|
| 683 |
+
|
| 684 |
+
|
| 685 |
+
@app.cell(hide_code=True)
|
| 686 |
+
def _(day_perc, mo, perc_col):
|
| 687 |
+
mo.md(f"""It is null for {day_perc.select(perc_col.is_null().mean()).item():.4%} of the rows, but is NaN for {day_perc.select(perc_col.is_nan().mean()).item():.4%} of them.
|
| 688 |
+
If we use the cleaned weather dataframe to calculate it instead of the dirty_weather, we will have no Nulls, but note how for this calculation we can end up with both, with each having a different meaning.
|
| 689 |
+
|
| 690 |
+
In this case it makes sense to fill in NaNs as 0 to indicate there was no rain during that period, but treating the nulls the same could lead to a different interpretation of the data, so remember to handle NaNs and nulls separately.
|
| 691 |
+
""")
|
| 692 |
+
return
|
| 693 |
+
|
| 694 |
+
|
| 695 |
+
@app.cell(hide_code=True)
|
| 696 |
+
def _(mo):
|
| 697 |
+
mo.md(
|
| 698 |
+
r"""
|
| 699 |
+
## Utilities
|
| 700 |
|
| 701 |
Loading data and imports
|
| 702 |
"""
|