Home > Software design >  How to Connect to AWS Emr Notebook with Airflow
How to Connect to AWS Emr Notebook with Airflow

Time:10-29

I want to connect my airflow to the Emr Notebook which is running on the cluster as of now I am successful to connect to the AWS EMR cluster but I can't connect to the notebook please help.

Here in the below code, I am loading some files to s3 buckets then I want to perform some step functions on my cluster which I have done but I also want to run the premade notebook on emr cluster which I am unable to connect. Please help thanks

from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.S3_hook import S3Hook
from airflow.operators import PythonOperator
from airflow.contrib.operators.emr_create_job_flow_operator import (
    EmrCreateJobFlowOperator,
)
from airflow.contrib.operators.emr_add_steps_operator import EmrAddStepsOperator
from airflow.contrib.sensors.emr_step_sensor import EmrStepSensor
from airflow.contrib.operators.emr_terminate_job_flow_operator import (
    EmrTerminateJobFlowOperator,
)
# Configurations
BUCKET_NAME = "as*****************"  # replace this with your bucket name
local_data = "./dags/data/movie_review.csv"
s3_data = "data/movie_review.csv"
local_script = "./dags/scripts/spark/random_text_classification.py"
s3_script = "scripts/random_text_classification.py"
s3_clean = "clean_data/"
SPARK_STEPS = [ # Note the params values are supplied to the operator
    {
        "Name": "Move raw data from S3 to HDFS",
        "ActionOnFailure": "CANCEL_AND_WAIT",
        "HadoopJarStep": {
            "Jar": "command-runner.jar",
            "Args": [
                "s3-dist-cp",
                "--src=s3://{{ params.BUCKET_NAME }}/data",
                "--dest=/movie",
            ],
        },
    },
    {
        "Name": "Classify movie reviews",
        "ActionOnFailure": "CANCEL_AND_WAIT",
        "HadoopJarStep": {
            "Jar": "command-runner.jar",
            "Args": [
                "spark-submit",
                "--deploy-mode",
                "client",
                "s3://{{ params.BUCKET_NAME }}/{{ params.s3_script }}",
            ],
        },
    },
    {
        "Name": "Move clean data from HDFS to S3",
        "ActionOnFailure": "CANCEL_AND_WAIT",
        "HadoopJarStep": {
            "Jar": "command-runner.jar",
            "Args": [
                "s3-dist-cp",
                "--src=/output",
                "--dest=s3://{{ params.BUCKET_NAME }}/{{ params.s3_clean }}",
            ],
        },
    },
]

# helper function
def _local_to_s3(filename, key, bucket_name=BUCKET_NAME):
    s3 = S3Hook()
    s3.load_file(filename=filename, bucket_name=bucket_name, replace=True, key=key)
default_args = {
    "owner": "airflow",
    "depends_on_past": True,
    "wait_for_downstream": True,
    "start_date": datetime(2020, 10, 17),
    "email": ["[email protected]"],
    "email_on_failure": False,
    "email_on_retry": False,
    "retries": 1,
    "retry_delay": timedelta(minutes=5),
}
dag = DAG(
    "spark_submit_airflow",
    default_args=default_args,
    schedule_interval="0 10 * * *",
    max_active_runs=1,
)

start_data_pipeline = DummyOperator(task_id="start_data_pipeline", dag=dag)

data_to_s3 = PythonOperator(
    dag=dag,
    task_id="data_to_s3",
    python_callable=_local_to_s3,
    op_kwargs={"filename": local_data, "key": s3_data,},
)
script_to_s3 = PythonOperator(
    dag=dag,
    task_id="script_to_s3",
    python_callable=_local_to_s3,
    op_kwargs={"filename": local_script, "key": s3_script,},
)

# Add your steps to the EMR cluster
step_adder = EmrAddStepsOperator(
    task_id="add_steps",
    job_flow_id="j-***********", #cluster id
    aws_conn_id="aws_default",
    steps=SPARK_STEPS,
    params={ # these params are used to fill the paramterized values in SPARK_STEPS json
        "BUCKET_NAME": BUCKET_NAME,
        "s3_data": s3_data,
        "s3_script": s3_script,
        "s3_clean": s3_clean,
    },
    dag=dag,
)
last_step = len(SPARK_STEPS) - 1
# wait for the steps to complete
step_checker = EmrStepSensor(
    task_id="watch_step",
    job_flow_id="j-*************",#cluster ID
    step_id="{{ task_instance.xcom_pull(task_ids='add_steps', key='return_value')["
      str(last_step)
      "] }}",
    aws_conn_id="aws_default",
    dag=dag,
)

end_data_pipeline = DummyOperator(task_id="end_data_pipeline", dag=dag)

start_data_pipeline >> [data_to_s3, script_to_s3]  >> step_adder >> step_checker >> end_data_pipeline


CodePudding user response:

I don't think that we have an emr operator for notebooks, as of yet.

In order to run premade emr notebook, you can use boto3 emr client's method start_notebook_execution by providing a path to a premade notebook.

Make a custom python operator that executes start_notebook_execution and use it in your pipeline. In this custom python operator, you will need a clusterID, which in your case is returned from EmrAddStepsOperator (step_adder)

def start_nb_execution(cluster_id,**context):
    
    emr = boto3.client('emr', region_name=REGION)
    start_nb = emr.start_notebook_execution(
        EditorId="YOUR_NOTEBOOK_ID",
        RelativePath="YOUR_NOTEBOOK_FILE_NAME",
        ExecutionEngine={'Id': cluster_id, 'Type': 'EMR'},
        ServiceRole='EMR_Notebooks_DefaultRole'
    )
    execution_id = start_nb['NotebookExecutionId']
    print("Started an execution: "   execution_id)
    return execution_id

Call this function as PythonOperator

start_nb_execution = PythonOperator(
    task_id='start_nb_execution', 
    python_callable=start_execution,
    provide_context=True,
   op_kwargs={"cluster_id":step_adder},
)

Now you can add it to pipeline as

start_data_pipeline >> [data_to_s3, script_to_s3]  >> step_adder >> step_checker >> start_nb_execution >> end_data_pipeline

There is one good tutorial here, which also has sensor example for notebook

  • Related