Home > Software engineering >  Problems when reading DataFrame with pyspark in Apache Zeppelin: very unclear exception on missing s
Problems when reading DataFrame with pyspark in Apache Zeppelin: very unclear exception on missing s

Time:12-24

First, loading the python libraries in order to work with Pyspark and use bokeh library:

%spark.pyspark

import bkzep
import numpy as np
from bokeh.io import output_notebook, show
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
from bokeh.layouts import gridplot
from pyspark.sql.functions import col, coalesce, lit, monotonically_increasing_id
from pyspark.sql import DataFrame
from pyspark.sql.functions import *

output_notebook(notebook_type='zeppelin')

Then prepare the df:

%pyspark

from pyspark.sql.functions import *

def plot_summaries(sensor, dfName):
    df = DataFrame(z.get(dfName), sqlContext)
    pdf = df.toPandas()
        #.select("*") \
        #.orderBy("index") \
        #.limit(1000) \
        #.withColumn("id", col("index")) \
        

    #sample = pdf.sample(50)
    source = ColumnDataSource(pdf)
    #print(pdf)

    TOOLTIPS = [    
        ("month", "@month"),        
        ("day", "@day"),    
        ("hour", "@hour"),
        ("min", "@{min}{0.3f}"),
        ("avg", "@{avg}{0.3f}"),
        ("max", "@{max}{0.3f}"),
        ("median", "@{median}{0.3f}"),
        ("stddev", "@{stddev}{0.3f}"),    
    ]

    TOOLTIPS2 = [    
        ("month", "@month"),
        ("day", "@day"),    
        ("count", "@{count}{0.3f}"),    
    ]

    fig = figure(title="Hourly summaries of '{}'".format(sensor),  tooltips=TOOLTIPS)
    #fig.line(x='id', y='avg', source=source, color="orange")
    #fig.line(x='id', y='min', source=source, color="green")
    #fig.line(x='id', y='max', source=source, color="red")
    fig.line(x='id', y='median', source=source, color="blue")
    #fig.line(x='id', y='stddev', source=source, color="aquamarine")

    #fig2 = figure(title="Hourly summaries of '{}' counters".format(sensor),  tooltips=TOOLTIPS2)
    #fig2.line(x='id', y='count', source=source, color="orange")

    show(gridplot([fig], ncols=1, plot_width=1000, plot_height=400))
    #show(fig)

sensors = [
    "Water_Level_Sensor_stddev",
"Water_Level_Sensor_mean"
]

and then call for the function in order to get bokeh plot:

%pyspark

from pyspark.sql.functions import *

keyCol = "month_day_hour"

#for sensor in sensors:
plot_summaries("Water_Level_Sensor_stddev", "pivoted")    

Then getting the below exception:

---------1------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-118-bda5385b9d44> in <module>
      4 
      5 #for sensor in sensors:
----> 6 plot_summaries("Water_Level_Sensor_stddev", "resultIndexed")

<ipython-input-106-d6669aca8991> in plot_summaries(sensor, dfName)
      3 def plot_summaries(sensor, dfName):
      4     df = DataFrame(z.get(dfName), sqlContext)
----> 5     pdf = df.toPandas()
      6         #.select("*") \
      7         #.orderBy("index") \

/spark/python/lib/pyspark.zip/pyspark/sql/pandas/conversion.py in toPandas(self)
    136 
    137         # Below is toPandas without Arrow optimization.
--> 138         pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns)
    139         column_counter = Counter(self.columns)
    140 

/spark/python/lib/pyspark.zip/pyspark/sql/dataframe.py in collect(self)
    594         """
    595         with SCCallSiteSync(self._sc) as css:
--> 596             sock_info = self._jdf.collectToPython()
    597         return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
    598 

AttributeError: 'NoneType' object has no attribute 'collectToPython'

CodePudding user response:

The reason is that "pivoted" was a scala DataFrame raised in above paragragh. I was wrong that it is possible to use it without serializing the Scala DataFrame somewhere and reading it as a Pyspark dataframe. However, the object seems to be picked up, but there are wrong and unexpected type of it that pyspark can't process.

  • Related