= Pandas = * Indexed (labelled) arrays * DateFrame * DateRange * Indexing, slicing * Apply common numpy statistics * Data alignment * Grouping Precip data you need for the exercise: [[https://wiki.zmaw.de/lehre/PythonCourse/PythonLES/Pandas?action=AttachFile&do=get&target=precip2.tar.gz | precip.tar.gz]] {{{#!python import numpy as np import pandas as p import Nio import matplotlib.pyplot as plt from matplotlib.dates import num2date, epoch2num, datetime nc1 = Nio.open_file('10147-precip.nc') # hamburg nc2 = Nio.open_file('10015-precip.nc') # helgoland time1 = nc1.variables['time'][:] time2 = nc2.variables['time'][:] rain1 = nc1.variables['rainfall_rate_hour'][:] rain2 = nc2.variables['rainfall_rate_hour'][:] # plot data # plot(rain1, 'g', rain2, 'b') # Timestamps shall be python dates dates1 = num2date(epoch2num(time1)) dates2 = num2date(epoch2num(time2)) # Indexed arrays - p.Series ds1 = p.Series(rain1, index = dates1) ds2 = p.Series(rain2, index = dates2) # Pandas is using numpy.na representation of not-a-number, # while Nio returns masked arrays # Many basic array operations are valid for pandas Series ds1 = np.where(ds1<0, np.nan, ds1) ds2 = np.where(ds2<0, np.nan, ds2) # built-in plotting functions ds1.plot() ds2.plot() # newer pandas version can drop NaN's, # current one can only fill, # otherwise drop by hand (hint: nan is not equal to nan :) ds1=ds1[ds1==ds1] ds2=ds2[ds2==ds2] # now we have series of different length print ds1.shape[0], ds2.shape[0] # to get the equal length series it's possible to use index from # one of the series ds2_nan = ds2.reindex(ds1.index) ds2_backfill = ds2.reindex(ds1.index, method = 'backfill') # Basic stats print "Max: %.2f Min: %.2f Mean: %.2f Median: %.2f Count: %.2f" % (ds2.max(), ds2.min(), ds2.mean(), ds2.median(), ds2.count()) # Cumulative sum ds2.cumsum() # DataFrame - 2D labelled arrays df=p.DataFrame({"helgoland":ds2, "hamburg":ds1}) # series of different length will share the same (extended) index print ds1.fillna(0).count(), ds2.fillna(0).count() print df['hamburg'].fillna(0).count(), df['helgoland'].fillna(0).count() # drop incomplete rows df.dropIncompleteRows() # correlation df.corr() # aggregation - sweet! # custom date range start=datetime.datetime(2004,5,1) end = datetime.datetime(2007,9,1) # create timerange with 3 hourly, pentad and monthly steps (doesn't work on cis servers) # dr1Hour = p.DateRange(start, end, offset = p.datetools.Hour()) # dr5Day = p.DateRange(start, end, offset=5 * p.datetools.day) # drMonth = p.DateRange(start, end, offset= p.datetools.monthEnd) # perform grouping of the data dfMonth = df.groupby(lambda x: x.month) dfYear = df.groupby(lambda x: x.year) dfMonthSum = dfMonth.agg(np.nansum) dfMonthMean = dfMonth.agg(np.mean) dfYearSum = dfYear.agg(p.Series.sum) # access single vector dfYearHelgoland = dfYear.agg(np.nansum)['helgoland'] # get years where the total sum of precip is larger than 800mm dfYearHelgoland[dfYearHelgoland>800] # get values of the vector dfYear.agg(np.nansum).values plt.show() }}}