Return-Path: X-Original-To: apmail-flink-issues-archive@minotaur.apache.org Delivered-To: apmail-flink-issues-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 36B5418409 for ; Tue, 26 May 2015 09:24:18 +0000 (UTC) Received: (qmail 49731 invoked by uid 500); 26 May 2015 09:24:18 -0000 Delivered-To: apmail-flink-issues-archive@flink.apache.org Received: (qmail 49692 invoked by uid 500); 26 May 2015 09:24:18 -0000 Mailing-List: contact issues-help@flink.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@flink.apache.org Delivered-To: mailing list issues@flink.apache.org Received: (qmail 49681 invoked by uid 99); 26 May 2015 09:24:18 -0000 Received: from arcas.apache.org (HELO arcas.apache.org) (140.211.11.28) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 26 May 2015 09:24:18 +0000 Date: Tue, 26 May 2015 09:24:17 +0000 (UTC) From: "ASF GitHub Bot (JIRA)" To: issues@flink.apache.org Message-ID: In-Reply-To: References: Subject: [jira] [Commented] (FLINK-2084) Create a dedicated streaming mode MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 7bit X-JIRA-FingerPrint: 30527f35849b9dde25b450d4833f0394 [ https://issues.apache.org/jira/browse/FLINK-2084?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14558913#comment-14558913 ] ASF GitHub Bot commented on FLINK-2084: --------------------------------------- Github user mxm commented on a diff in the pull request: https://github.com/apache/flink/pull/718#discussion_r31016976 --- Diff: flink-runtime/src/test/java/org/apache/flink/runtime/memory/MemoryManagerLazyAllocationTest.java --- @@ -0,0 +1,209 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.runtime.memory; + +import org.apache.flink.core.memory.MemorySegment; +import org.apache.flink.runtime.jobgraph.tasks.AbstractInvokable; +import org.apache.flink.runtime.memorymanager.DefaultMemoryManager; +import org.apache.flink.runtime.memorymanager.MemoryAllocationException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +import static org.junit.Assert.fail; + +/** + * Tests for the memory manager, in the mode where it pre-allocates all memory. + */ +public class MemoryManagerLazyAllocationTest { + + private static final long RANDOM_SEED = 643196033469871L; + + private static final int MEMORY_SIZE = 1024 * 1024 * 72; // 72 MiBytes + + private static final int PAGE_SIZE = 1024 * 32; // 32 KiBytes + + private static final int NUM_PAGES = MEMORY_SIZE / PAGE_SIZE; + + private DefaultMemoryManager memoryManager; + + private Random random; + + + @Before + public void setUp() { + this.memoryManager = new DefaultMemoryManager(MEMORY_SIZE, 1, PAGE_SIZE, false); + this.random = new Random(RANDOM_SEED); + } + + @After + public void tearDown() { + if (!this.memoryManager.verifyEmpty()) { + fail("Memory manager is not complete empty and valid at the end of the test."); + } + this.memoryManager = null; + this.random = null; + } + + @Test + public void allocateAllSingle() { + try { + final AbstractInvokable mockInvoke = new DummyInvokable(); + List segments = new ArrayList(); + + try { + for (int i = 0; i < NUM_PAGES; i++) { + segments.add(this.memoryManager.allocatePages(mockInvoke, 1).get(0)); + } + } + catch (MemoryAllocationException e) { + fail("Unable to allocate memory"); + } + + for (MemorySegment seg : segments) { + this.memoryManager.release(seg); + } + } + catch (Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } + + @Test + public void allocateAllMulti() { + try { + final AbstractInvokable mockInvoke = new DummyInvokable(); + final List segments = new ArrayList(); + + try { + for(int i = 0; i < NUM_PAGES / 2; i++) { + segments.addAll(this.memoryManager.allocatePages(mockInvoke, 2)); + } + } catch (MemoryAllocationException e) { + Assert.fail("Unable to allocate memory"); + } + + this.memoryManager.release(segments); + } + catch (Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } + + @Test + public void allocateMultipleOwners() { + final int NUM_OWNERS = 17; + + try { + AbstractInvokable[] owners = new AbstractInvokable[NUM_OWNERS]; + + @SuppressWarnings("unchecked") + List[] mems = (List[]) new List[NUM_OWNERS]; + + for (int i = 0; i < NUM_OWNERS; i++) { + owners[i] = new DummyInvokable(); + mems[i] = new ArrayList(64); + } + + // allocate all memory to the different owners + for (int i = 0; i < NUM_PAGES; i++) { + final int owner = this.random.nextInt(NUM_OWNERS); + mems[owner].addAll(this.memoryManager.allocatePages(owners[owner], 1)); + } + + // free one owner at a time + for (int i = 0; i < NUM_OWNERS; i++) { + this.memoryManager.releaseAll(owners[i]); + owners[i] = null; + Assert.assertTrue("Released memory segments have not been destroyed.", allMemorySegmentsFreed(mems[i])); + mems[i] = null; + + // check that the owner owners were not affected + for (int k = i+1; k < NUM_OWNERS; k++) { + Assert.assertTrue("Non-released memory segments are accidentaly destroyed.", allMemorySegmentsValid(mems[k])); + } + } + } + catch (Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } + + @Test + public void allocateTooMuch() { + try { + final AbstractInvokable mockInvoke = new DummyInvokable(); + + List segs = this.memoryManager.allocatePages(mockInvoke, NUM_PAGES); + + try { + this.memoryManager.allocatePages(mockInvoke, 1); + Assert.fail("Expected MemoryAllocationException."); + } catch (MemoryAllocationException maex) { + // expected + } + + Assert.assertTrue("The previously allocated segments were not valid any more.", + allMemorySegmentsValid(segs)); + --- End diff -- Formatting > Create a dedicated streaming mode > --------------------------------- > > Key: FLINK-2084 > URL: https://issues.apache.org/jira/browse/FLINK-2084 > Project: Flink > Issue Type: Improvement > Components: Distributed Runtime > Affects Versions: 0.9 > Reporter: Stephan Ewen > Assignee: Stephan Ewen > Fix For: 0.9 > > > As per discussion on the mailing list > http://mail-archives.apache.org/mod_mbox/flink-dev/201505.mbox/browser > - We add a dedicated streaming mode for now. The streaming mode supersedes the batch mode, so it can run both type of programs. > - The streaming mode sets the memory manager to "lazy allocation". > -> So long as it runs pure streaming jobs, the full heap will be > available to window buffers and UDFs. > -> Batch programs can still run, so mixed workloads are not prevented. > Batch programs are a bit less robust there, because the memory manager does not pre-allocate memory. UDFs can eat into Flink's memory portion. > - The streaming mode starts the necessary configured components/services for state backups > Over the next versions, we want to bring these things together: > - use the managed memory for window buffers > - on-demand starting of the state backend > Then, we deprecate the streaming mode, let both modes start the cluster in the same way. -- This message was sent by Atlassian JIRA (v6.3.4#6332)