diff --git config/server.properties config/server.properties
index b47fe94..13a9815 100644
--- config/server.properties
+++ config/server.properties
@@ -113,3 +113,10 @@ zk.connect=localhost:2181
 
 # Timeout in ms for connecting to zookeeper
 zk.connectiontimeout.ms=1000000
+
+# metrics reporter properties
+# kafka.metrics.polling.interval.secs=5
+# kafka.metrics.reporters=kafka.metrics.KafkaCSVMetricsReporter
+# kafka.csv.metrics.dir=kafka_metrics
+# kafka.csv.metrics.reporter.enabled=true
+
diff --git core/lib/metrics-annotation-3.0.0-10ccc80c0.jar core/lib/metrics-annotation-3.0.0-10ccc80c0.jar
new file mode 100644
index 0000000..9f96c1a
Binary files /dev/null and core/lib/metrics-annotation-3.0.0-10ccc80c0.jar differ
diff --git core/lib/metrics-core-3.0.0-10ccc80c0.jar core/lib/metrics-core-3.0.0-10ccc80c0.jar
new file mode 100644
index 0000000..5f04089
Binary files /dev/null and core/lib/metrics-core-3.0.0-10ccc80c0.jar differ
diff --git core/src/main/scala/kafka/cluster/Partition.scala core/src/main/scala/kafka/cluster/Partition.scala
index 3aa6eab..4b20e42 100644
--- core/src/main/scala/kafka/cluster/Partition.scala
+++ core/src/main/scala/kafka/cluster/Partition.scala
@@ -50,7 +50,7 @@ class Partition(val topic: String,
   newGauge(
     topic + "-" + partitionId + "UnderReplicated",
     new Gauge[Int] {
-      def value() = {
+      def getValue = {
         if (isUnderReplicated) 1 else 0
       }
     }
diff --git core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala
index f5df1fc..886388a 100644
--- core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala
+++ core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala
@@ -686,7 +686,7 @@ private[kafka] class ZookeeperConsumerConnector(val config: ConsumerConfig,
       newGauge(
         config.groupId + "-" + topicThreadId._1 + "-" + topicThreadId._2 + "-FetchQueueSize",
         new Gauge[Int] {
-          def value() = q.size
+          def getValue = q.size
         }
       )
     })
diff --git core/src/main/scala/kafka/controller/KafkaController.scala core/src/main/scala/kafka/controller/KafkaController.scala
index d43af7f..165c1d9 100644
--- core/src/main/scala/kafka/controller/KafkaController.scala
+++ core/src/main/scala/kafka/controller/KafkaController.scala
@@ -50,7 +50,7 @@ class KafkaController(val config : KafkaConfig, zkClient: ZkClient) extends Logg
   newGauge(
     "ActiveControllerCount",
     new Gauge[Int] {
-      def value() = if (isActive) 1 else 0
+      def getValue = if (isActive) 1 else 0
     }
   )
 
diff --git core/src/main/scala/kafka/log/Log.scala core/src/main/scala/kafka/log/Log.scala
index 1c01df8..a1b01e0 100644
--- core/src/main/scala/kafka/log/Log.scala
+++ core/src/main/scala/kafka/log/Log.scala
@@ -150,14 +150,14 @@ private[kafka] class Log( val dir: File, val maxLogFileSize: Long, val maxMessag
   newGauge(
     name + "-" + "NumLogSegments",
     new Gauge[Int] {
-      def value() = numberOfSegments
+      def getValue = numberOfSegments
     }
   )
 
   newGauge(
     name + "-" + "LogEndOffset",
     new Gauge[Long] {
-      def value() = logEndOffset
+      def getValue = logEndOffset
     }
   )
 
diff --git core/src/main/scala/kafka/metrics/KafkaCSVMetricsReporter.scala core/src/main/scala/kafka/metrics/KafkaCSVMetricsReporter.scala
index cfe7e34..d676b57 100644
--- core/src/main/scala/kafka/metrics/KafkaCSVMetricsReporter.scala
+++ core/src/main/scala/kafka/metrics/KafkaCSVMetricsReporter.scala
@@ -50,9 +50,10 @@ private class KafkaCSVMetricsReporter extends KafkaMetricsReporter
         if (!csvDir.exists())
           csvDir.mkdirs()
         underlying = new CsvReporter(Metrics.defaultRegistry(), csvDir)
-        if (props.getBoolean("kafka.csv.metrics.reporter.enabled", false))
+        if (props.getBoolean("kafka.csv.metrics.reporter.enabled", default = false)) {
+          initialized = true
           startReporter(metricsConfig.pollingIntervalSecs)
-        initialized = true
+        }
       }
     }
   }
diff --git core/src/main/scala/kafka/network/RequestChannel.scala core/src/main/scala/kafka/network/RequestChannel.scala
index 34caf6d..133538b 100644
--- core/src/main/scala/kafka/network/RequestChannel.scala
+++ core/src/main/scala/kafka/network/RequestChannel.scala
@@ -92,7 +92,7 @@ class RequestChannel(val numProcessors: Int, val queueSize: Int) extends KafkaMe
   newGauge(
     "RequestQueueSize",
     new Gauge[Int] {
-      def value() = requestQueue.size
+      def getValue = requestQueue.size
     }
   )
 
diff --git core/src/main/scala/kafka/producer/async/ProducerSendThread.scala core/src/main/scala/kafka/producer/async/ProducerSendThread.scala
index 8e08e12..f05bd6b 100644
--- core/src/main/scala/kafka/producer/async/ProducerSendThread.scala
+++ core/src/main/scala/kafka/producer/async/ProducerSendThread.scala
@@ -36,7 +36,7 @@ class ProducerSendThread[K,V](val threadName: String,
   newGauge(
     "ProducerQueueSize-" + getId,
     new Gauge[Int] {
-      def value() = queue.size
+      def getValue = queue.size
     }
   )
 
diff --git core/src/main/scala/kafka/server/AbstractFetcherThread.scala core/src/main/scala/kafka/server/AbstractFetcherThread.scala
index fbab2db..0dbd9f0 100644
--- core/src/main/scala/kafka/server/AbstractFetcherThread.scala
+++ core/src/main/scala/kafka/server/AbstractFetcherThread.scala
@@ -158,7 +158,7 @@ class FetcherLagMetrics(name: (String, Int)) extends KafkaMetricsGroup {
   newGauge(
     name._1 + "-" + name._2 + "-ConsumerLag",
     new Gauge[Long] {
-      def value() = lagVal.get
+      def getValue = lagVal.get
     }
   )
 
diff --git core/src/main/scala/kafka/server/ReplicaManager.scala core/src/main/scala/kafka/server/ReplicaManager.scala
index f078b99..515ba5a 100644
--- core/src/main/scala/kafka/server/ReplicaManager.scala
+++ core/src/main/scala/kafka/server/ReplicaManager.scala
@@ -47,13 +47,13 @@ class ReplicaManager(val config: KafkaConfig, time: Time, val zkClient: ZkClient
   newGauge(
     "LeaderCount",
     new Gauge[Int] {
-      def value() = leaderPartitions.size
+      def getValue = leaderPartitions.size
     }
   )
   newGauge(
     "UnderReplicatedPartitions",
     new Gauge[Int] {
-      def value() = {
+      def getValue = {
         leaderPartitionsLock synchronized {
           leaderPartitions.count(_.isUnderReplicated)
         }
diff --git core/src/main/scala/kafka/server/RequestPurgatory.scala core/src/main/scala/kafka/server/RequestPurgatory.scala
index 0aac6d1..1a3dbd3 100644
--- core/src/main/scala/kafka/server/RequestPurgatory.scala
+++ core/src/main/scala/kafka/server/RequestPurgatory.scala
@@ -69,7 +69,7 @@ abstract class RequestPurgatory[T <: DelayedRequest, R](brokerId: Int = 0) exten
   newGauge(
     "NumDelayedRequests",
     new Gauge[Int] {
-      def value() = expiredRequestReaper.unsatisfied.get()
+      def getValue = expiredRequestReaper.unsatisfied.get()
     }
   )
 
diff --git core/src/test/scala/unit/kafka/metrics/KafkaTimerTest.scala core/src/test/scala/unit/kafka/metrics/KafkaTimerTest.scala
index 019e515..b6bc897 100644
--- core/src/test/scala/unit/kafka/metrics/KafkaTimerTest.scala
+++ core/src/test/scala/unit/kafka/metrics/KafkaTimerTest.scala
@@ -36,20 +36,20 @@ class KafkaTimerTest extends JUnit3Suite {
     timer.time {
       clock.addMillis(1000)
     }
-    assertEquals(1, metric.count())
-    assertTrue((metric.max() - 1000).abs <= Double.Epsilon)
-    assertTrue((metric.min() - 1000).abs <= Double.Epsilon)
+    assertEquals(1, metric.getCount())
+    assertTrue((metric.getMax() - 1000).abs <= Double.Epsilon)
+    assertTrue((metric.getMin() - 1000).abs <= Double.Epsilon)
   }
 
   private class ManualClock extends Clock {
 
     private var ticksInNanos = 0L
 
-    override def tick() = {
+    override def getTick() = {
       ticksInNanos
     }
 
-    override def time() = {
+    override def getTime() = {
       TimeUnit.NANOSECONDS.toMillis(ticksInNanos)
     }
 
diff --git project/build/KafkaProject.scala project/build/KafkaProject.scala
index 230c28f..4d27d82 100644
--- project/build/KafkaProject.scala
+++ project/build/KafkaProject.scala
@@ -66,17 +66,42 @@ class KafkaProject(info: ProjectInfo) extends ParentProject(info) with IdeaProje
         <scope>compile</scope>
       </dependency>
 
+    def metricsDeps =
+      <dependencies>
+        <dependency>
+          <groupId>com.yammer.metrics</groupId>
+          <artifactId>metrics-core</artifactId>
+          <version>3.0.0-10ccc80c0</version>
+          <scope>compile</scope>
+        </dependency>
+        <dependency>
+          <groupId>com.yammer.metrics</groupId>
+          <artifactId>metrics-annotations</artifactId>
+          <version>3.0.0-10ccc80c0</version>
+          <scope>compile</scope>
+        </dependency>
+      </dependencies>
+
     object ZkClientDepAdder extends RuleTransformer(new RewriteRule() {
       override def transform(node: Node): Seq[Node] = node match {
         case Elem(prefix, "dependencies", attribs, scope, deps @ _*) => {
-          Elem(prefix, "dependencies", attribs, scope, deps ++ zkClientDep :_*)
+          Elem(prefix, "dependencies", attribs, scope, deps ++ zkClientDep:_*)
+        }
+        case other => other
+      }
+    })
+
+    object MetricsDepAdder extends RuleTransformer(new RewriteRule() {
+      override def transform(node: Node): Seq[Node] = node match {
+        case Elem(prefix, "dependencies", attribs, scope, deps @ _*) => {
+          Elem(prefix, "dependencies", attribs, scope, deps ++ metricsDeps:_*)
         }
         case other => other
       }
     })
 
     override def pomPostProcess(pom: Node): Node = {
-      ZkClientDepAdder(pom)
+      MetricsDepAdder(ZkClientDepAdder(pom))
     }
 
     override def artifactID = "kafka"
@@ -251,7 +276,6 @@ class KafkaProject(info: ProjectInfo) extends ParentProject(info) with IdeaProje
   trait CoreDependencies {
     val log4j = "log4j" % "log4j" % "1.2.15"
     val jopt = "net.sf.jopt-simple" % "jopt-simple" % "3.2"
-    val metricsCore = "com.yammer.metrics" % "metrics-core" % "latest.release"
     val slf4jSimple = "org.slf4j" % "slf4j-simple" % "latest.release"
   }
   
