Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 34154200BFC for ; Tue, 6 Dec 2016 15:32:02 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id 32FF2160B29; Tue, 6 Dec 2016 14:32:02 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 208B5160B1B for ; Tue, 6 Dec 2016 15:32:00 +0100 (CET) Received: (qmail 62401 invoked by uid 500); 6 Dec 2016 14:31:59 -0000 Mailing-List: contact issues-help@flink.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@flink.apache.org Delivered-To: mailing list issues@flink.apache.org Received: (qmail 62052 invoked by uid 99); 6 Dec 2016 14:31:59 -0000 Received: from arcas.apache.org (HELO arcas) (140.211.11.28) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 06 Dec 2016 14:31:59 +0000 Received: from arcas.apache.org (localhost [127.0.0.1]) by arcas (Postfix) with ESMTP id A24282C03E4 for ; Tue, 6 Dec 2016 14:31:59 +0000 (UTC) Date: Tue, 6 Dec 2016 14:31:59 +0000 (UTC) From: "ASF GitHub Bot (JIRA)" To: issues@flink.apache.org Message-ID: In-Reply-To: References: Subject: [jira] [Commented] (FLINK-5041) Implement savepoint backwards compatibility 1.1 -> 1.2 MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 7bit X-JIRA-FingerPrint: 30527f35849b9dde25b450d4833f0394 archived-at: Tue, 06 Dec 2016 14:32:02 -0000 [ https://issues.apache.org/jira/browse/FLINK-5041?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15725657#comment-15725657 ] ASF GitHub Bot commented on FLINK-5041: --------------------------------------- Github user StephanEwen commented on a diff in the pull request: https://github.com/apache/flink/pull/2781#discussion_r91083375 --- Diff: flink-runtime/src/main/java/org/apache/flink/migration/runtime/checkpoint/TaskState.java --- @@ -0,0 +1,166 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.migration.runtime.checkpoint; + +import org.apache.flink.migration.runtime.state.StateHandle; +import org.apache.flink.migration.util.SerializedValue; +import org.apache.flink.runtime.jobgraph.JobVertexID; + +import java.io.Serializable; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +@Deprecated +public class TaskState implements Serializable { + + private static final long serialVersionUID = -4845578005863201810L; + + private final JobVertexID jobVertexID; + + /** Map of task states which can be accessed by their sub task index */ + private final Map subtaskStates; + + /** Map of key-value states which can be accessed by their key group index */ + private final Map kvStates; + + /** Parallelism of the operator when it was checkpointed */ + private final int parallelism; + + public TaskState(JobVertexID jobVertexID, int parallelism) { + this.jobVertexID = jobVertexID; + + this.subtaskStates = new HashMap<>(parallelism); + + this.kvStates = new HashMap<>(); + + this.parallelism = parallelism; + } + + public JobVertexID getJobVertexID() { + return jobVertexID; + } + + public void putState(int subtaskIndex, SubtaskState subtaskState) { + if (subtaskIndex < 0 || subtaskIndex >= parallelism) { + throw new IndexOutOfBoundsException("The given sub task index " + subtaskIndex + + " exceeds the maximum number of sub tasks " + subtaskStates.size()); + } else { + subtaskStates.put(subtaskIndex, subtaskState); + } + } + + public SubtaskState getState(int subtaskIndex) { + if (subtaskIndex < 0 || subtaskIndex >= parallelism) { + throw new IndexOutOfBoundsException("The given sub task index " + subtaskIndex + + " exceeds the maximum number of sub tasks " + subtaskStates.size()); + } else { + return subtaskStates.get(subtaskIndex); + } + } + + public Collection getStates() { + return subtaskStates.values(); + } + + public long getStateSize() { + long result = 0L; + + for (SubtaskState subtaskState : subtaskStates.values()) { + result += subtaskState.getStateSize(); + } + + for (KeyGroupState keyGroupState : kvStates.values()) { + result += keyGroupState.getStateSize(); + } + + return result; + } + + public int getNumberCollectedStates() { + return subtaskStates.size(); + } + + public int getParallelism() { + return parallelism; + } + + public void putKvState(int keyGroupId, KeyGroupState keyGroupState) { + kvStates.put(keyGroupId, keyGroupState); + } + + public KeyGroupState getKvState(int keyGroupId) { + return kvStates.get(keyGroupId); + } + + /** + * Retrieve the set of key-value state key groups specified by the given key group partition set. + * The key groups are returned as a map where the key group index maps to the serialized state + * handle of the key group. + * + * @param keyGroupPartition Set of key group indices + * @return Map of serialized key group state handles indexed by their key group index. + */ + public Map>> getUnwrappedKvStates(Set keyGroupPartition) { + HashMap>> result = new HashMap<>(keyGroupPartition.size()); + + for (Integer keyGroupId : keyGroupPartition) { + KeyGroupState keyGroupState = kvStates.get(keyGroupId); + + if (keyGroupState != null) { + result.put(keyGroupId, kvStates.get(keyGroupId).getKeyGroupState()); + } + } + + return result; + } + + public int getNumberCollectedKvStates() { + return kvStates.size(); + } + + public void discard(ClassLoader classLoader) throws Exception { + for (SubtaskState subtaskState : subtaskStates.values()) { --- End diff -- Deactivate this method (see above) > Implement savepoint backwards compatibility 1.1 -> 1.2 > ------------------------------------------------------ > > Key: FLINK-5041 > URL: https://issues.apache.org/jira/browse/FLINK-5041 > Project: Flink > Issue Type: New Feature > Components: State Backends, Checkpointing > Affects Versions: 1.2.0 > Reporter: Stefan Richter > Assignee: Stefan Richter > > This issue tracks the implementation of backwards compatibility between Flink 1.1 and 1.2 releases. > This task subsumes: > - Converting old savepoints to new savepoints, including a conversion of state handles to their new replacement. > - Converting keyed state from old backend implementations to their new counterparts. > - Converting operator and function state for all changed operators. -- This message was sent by Atlassian JIRA (v6.3.4#6332)